file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
authorizer.go | /**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package authorizer
import (
"errors"
"k8s.io/apiserver/pkg/authorization/authorizer"
)
type openshiftAuthorizer struct {
delegate authorizer.Authorizer
forbiddenMessageMaker ForbiddenMessageMaker
}
func NewAuthorizer(delegate authorizer.Authorizer, forbiddenMessageMaker ForbiddenMessageMaker) authorizer.Authorizer {
return &openshiftAuthorizer{delegate: delegate, forbiddenMessageMaker: forbiddenMessageMaker}
}
| if attributes.GetUser() == nil {
return authorizer.DecisionNoOpinion, "", errors.New("no user available on context")
}
authorizationDecision, delegateReason, err := a.delegate.Authorize(attributes)
if authorizationDecision == authorizer.DecisionAllow {
return authorizer.DecisionAllow, reason(attributes), nil
}
// errors are allowed to occur
if err != nil {
return authorizationDecision, "", err
}
denyReason, err := a.forbiddenMessageMaker.MakeMessage(attributes)
if err != nil {
denyReason = err.Error()
}
if len(delegateReason) > 0 {
denyReason += ": " + delegateReason
}
return authorizationDecision, denyReason, nil
}
func reason(attributes authorizer.Attributes) string {
if len(attributes.GetNamespace()) == 0 {
return "allowed by cluster rule"
}
// not 100% accurate, because the rule may have been provided by a cluster rule. we no longer have
// this distinction upstream in practice.
return "allowed by openshift authorizer"
} | func (a *openshiftAuthorizer) Authorize(attributes authorizer.Attributes) (authorizer.Decision, string, error) { |
mod.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! This module defines physical storage schema for any single-entry data.
//!
//! There will be only one row in this column family for each type of data.
//! The key will be a serialized enum type designating the data type and should not have any meaning
//! and be used. ```text
//! |<-------key------->|<-----value----->|
//! | single entry key | raw value bytes |
//! ```
use super::{ensure_slice_len_eq, SINGLE_ENTRY_CF_NAME};
use byteorder::ReadBytesExt;
use failure::prelude::*;
use num_derive::{FromPrimitive, ToPrimitive}; | use solana_libra_schemadb::{
define_schema,
schema::{KeyCodec, ValueCodec},
};
use std::mem::size_of;
define_schema!(
SingleEntrySchema,
SingleEntryKey,
Vec<u8>,
SINGLE_ENTRY_CF_NAME
);
#[derive(Debug, Eq, PartialEq, FromPrimitive, ToPrimitive)]
#[repr(u8)]
pub enum SingleEntryKey {
// Used to store ConsensusState
ConsensusState = 0,
// Used to store the highest timeout certificates
HighestTimeoutCertificates = 1,
}
impl KeyCodec<SingleEntrySchema> for SingleEntryKey {
fn encode_key(&self) -> Result<Vec<u8>> {
Ok(vec![self
.to_u8()
.ok_or_else(|| format_err!("ToPrimitive failed."))?])
}
fn decode_key(data: &[u8]) -> Result<Self> {
ensure_slice_len_eq(data, size_of::<u8>())?;
let key = (&data[..]).read_u8()?;
SingleEntryKey::from_u8(key).ok_or_else(|| format_err!("FromPrimitive failed."))
}
}
impl ValueCodec<SingleEntrySchema> for Vec<u8> {
fn encode_value(&self) -> Result<Vec<u8>> {
Ok(self.clone())
}
fn decode_value(data: &[u8]) -> Result<Self> {
Ok(data.to_vec())
}
}
#[cfg(test)]
mod test; | use num_traits::{FromPrimitive, ToPrimitive}; |
vuicash_condensing_txs.py | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.vuicash import *
from test_framework.vuicashconfig import *
import sys
class CondensingTxsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-txindex=1', '-rpcmaxgasprice=10000000']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
# verify that the state hash is not 0 on genesis
def setup_contracts(self):
"""
pragma solidity ^0.4.0;
contract Sender1 {
// Sender2 sender2;
// Sender3 sender3;
address public sender2;
address public sender3;
function Sender1() {
}
function setSenders(address senderx, address sendery) public{
// sender2=Sender2(senderx);
// sender3=Sender3(sendery);
sender2 = senderx;
sender3 = sendery;
}
function share() public payable{
if(msg.sender != address(sender3)){
// sender2.share.value(msg.value/2);
sender2.call.value(msg.value/2)(bytes4(sha3("share()")));
}
}
function sendAll() public payable{
// sender2.keep.value(msg.value + this.balance);
// sender2.call.value(msg.value + this.balance)(bytes4(sha3("keep()")));
sender2.call.value(this.balance)(bytes4(sha3("keep()")));
}
function keep() public payable{
}
function() payable { } //always payable
}
contract Sender2{
// Sender1 sender1;
// Sender3 sender3;
address public sender1;
address public sender3;
function Sender2() {
}
function setSenders(address senderx, address sendery) public{
// sender1=Sender1(senderx);
// sender3=Sender3(sendery);
sender1 = senderx;
sender3 = sendery;
}
function share() public payable{
// sender3.share.value(msg.value/2);
sender3.call.value(msg.value/2)(bytes4(sha3("share()")));
}
function keep() public payable{
}
function withdrawAll() public{
// sender3.withdraw();
sender3.call(bytes4(sha3("withdraw()")));
msg.sender.send(this.balance);
}
function() payable { } //always payable
}
contract Sender3 {
// Sender1 sender1;
// Sender2 sender2;
address public sender1;
address public sender2;
function Sender3() {
}
function setSenders(address senderx, address sendery) public{
// sender1=Sender1(senderx);
// sender2=Sender2(sendery);
sender1 = senderx;
sender2 = sendery;
}
function share() public payable{
// sender1.share.value(msg.value/2);
// sender2.keep.value(msg.value/4);
sender1.call.value(msg.value/2)(bytes4(sha3("share()")));
sender2.call.value(msg.value/4)(bytes4(sha3("keep()")));
}
function withdraw() public{
msg.sender.send(this.balance);
}
function() payable { } //always payable
}
"""
sender1_bytecode = "6060604052341561000c57fe5b5b5b5b6104cb8061001e6000396000f30060606040523615610076576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635579818d1461007f578063622836a3146100d45780639b0079d414610126578063a8d5fd6514610178578063e14f680f14610182578063e4d06d821461018c575b61007d5b5b565b005b341561008757fe5b6100d2600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610196565b005b34156100dc57fe5b6100e461021d565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561012e57fe5b610136610243565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610180610269565b005b61018a6103a9565b005b61019461049c565b005b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156103a657600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660023481151561030557fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b5b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163073ffffffffffffffffffffffffffffffffffffffff163160405180807f6b65657028290000000000000000000000000000000000000000000000000000815250600601905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b5b5600a165627a7a72305820b491c90fc7b4f09ab3f6262b83707908d390a97f9730429d1ff5fa8e44a63b190029"
self.sender1 = self.node.createcontract(sender1_bytecode, 1000000)['address']
sender2_bytecode = "6060604052341561000c57fe5b5b5b5b6104b28061001e6000396000f30060606040523615610076576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680635579818d1461007f578063853828b6146100d45780639b0079d4146100e6578063a8d5fd6514610138578063e4d06d8214610142578063f34e0e7b1461014c575b61007d5b5b565b005b341561008757fe5b6100d2600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061019e565b005b34156100dc57fe5b6100e4610225565b005b34156100ee57fe5b6100f661034f565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610140610375565b005b61014a61045d565b005b341561015457fe5b61015c610460565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1660405180807f7769746864726177282900000000000000000000000000000000000000000000815250600a01905060405180910390207c010000000000000000000000000000000000000000000000000000000090046040518163ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038160008761646e5a03f192505050503373ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051809050600060405180830381858888f19350505050505b565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166002348115156103ba57fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b5b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16815600a165627a7a723058201842d5027fea2d624a38de6731e71832836efe8c51e5815b8ad85b7f3639e72a0029"
self.sender2 = self.node.createcontract(sender2_bytecode, 1000000)['address']
sender3_bytecode = "6060604052341561000c57fe5b5b5b5b6104a88061001e6000396000f3006060604052361561006b576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680633ccfd60b146100745780635579818d14610086578063622836a3146100db578063a8d5fd651461012d578063f34e0e7b14610137575b6100725b5b565b005b341561007c57fe5b610084610189565b005b341561008e57fe5b6100d9600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506101dc565b005b34156100e357fe5b6100eb610263565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610135610289565b005b341561013f57fe5b610147610456565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3373ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051809050600060405180830381858888f19350505050505b565b81600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166002348115156102ce57fe5b0460405180807f7368617265282900000000000000000000000000000000000000000000000000815250600701905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f1935050505050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166004348115156103b357fe5b0460405180807f6b65657028290000000000000000000000000000000000000000000000000000815250600601905060405180910390207c01000000000000000000000000000000000000000000000000000000009004906040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180905060006040518083038185886187965a03f19350505050505b565b600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16815600a165627a7a72305820cb1b06b481990e1e218f7d0b51a3ffdf5b7439cfdd9bb2dccc1476cb84dfc95b0029"
self.sender3 = self.node.createcontract(sender3_bytecode, 1000000)['address']
self.node.generate(1)
assert(len(self.node.listcontracts()) == 3+NUM_DEFAULT_DGP_CONTRACTS)
self.keep_abi = "e4d06d82"
self.sendAll_abi = "e14f680f"
self.setSenders_abi = "5579818d"
self.share_abi = "a8d5fd65"
self.withdrawAll_abi = "853828b6"
self.withdraw_abi = "3ccfd60b"
self.sender1_abi = "f34e0e7b"
self.sender2_abi = "622836a3"
self.sender3_abi = "9b0079d4"
padded_sender1 = self.sender1.zfill(64)
padded_sender2 = self.sender2.zfill(64)
padded_sender3 = self.sender3.zfill(64)
self.node.sendtocontract(self.sender1, self.setSenders_abi + padded_sender2 + padded_sender3)
self.node.sendtocontract(self.sender2, self.setSenders_abi + padded_sender1 + padded_sender3)
self.node.sendtocontract(self.sender3, self.setSenders_abi + padded_sender1 + padded_sender2)
self.node.generate(1)
# Verify that the senders have been set correctly
assert_equal(self.node.callcontract(self.sender1, self.sender2_abi)['executionResult']['output'][24:], self.sender2)
assert_equal(self.node.callcontract(self.sender1, self.sender3_abi)['executionResult']['output'][24:], self.sender3)
assert_equal(self.node.callcontract(self.sender2, self.sender1_abi)['executionResult']['output'][24:], self.sender1)
assert_equal(self.node.callcontract(self.sender2, self.sender3_abi)['executionResult']['output'][24:], self.sender3)
assert_equal(self.node.callcontract(self.sender3, self.sender1_abi)['executionResult']['output'][24:], self.sender1)
assert_equal(self.node.callcontract(self.sender3, self.sender2_abi)['executionResult']['output'][24:], self.sender2)
def run_test(self):
self.node = self.nodes[0]
self.node.generate(COINBASE_MATURITY+50)
print("Setting up contracts and calling setSenders")
self.setup_contracts()
A1 = self.node.getnewaddress()
self.node.sendtoaddress(A1, 1)
self.node.generate(1)
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" not in self.node.getaccountinfo(self.sender2))
assert("vin" not in self.node.getaccountinfo(self.sender3))
T1_id = self.node.sendtocontract(self.sender1, self.share_abi, 8)['txid']
B2_id = self.node.generate(1)[0]
B2 = self.node.getblock(B2_id)
# Since this is a ṔoW block we only require 3 txs atm (coinbase, T1 and COND tx)
assert_equal(B2['tx'][1], T1_id)
assert_equal(len(B2['tx']), 3)
C1_id = B2['tx'][2]
C1 = self.node.getrawtransaction(C1_id, True)
assert_vin(C1, [('OP_SPEND', )]) |
# We set the tx fee of T2 to a higher value such that it will be prioritized (be at index 1 in the block)
T2_id = self.node.sendtocontract(self.sender1, self.keep_abi, 2, 50000, 0.0001)['txid']
T3_id = self.node.sendtocontract(self.sender1, self.sendAll_abi, 2)['txid']
B3_id = self.node.generate(1)[0]
B3 = self.node.getblock(B3_id)
# coinbase, T2, C2, T3, C3
assert_equal(len(B3['tx']), 5)
assert_equal(B3['tx'][1], T2_id)
C2_id = B3['tx'][2]
C3_id = B3['tx'][4]
C2 = self.node.getrawtransaction(C2_id, True)
C3 = self.node.getrawtransaction(C3_id, True)
assert_vin(C2, [('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C2, [(7, 'call')])
assert_vin(C3, [('OP_SPEND', ), ('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C3, [(11.5, 'call')])
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" in self.node.getaccountinfo(self.sender2))
assert("vin" in self.node.getaccountinfo(self.sender3))
# We need the txfee to be higher than T5 so that T4 tx is prioritized over T5.
# We set the gas such that the the tx will run but not immediately throw a out of gas exception
T4_raw = make_transaction(self.node, [make_vin(self.node, 3*COIN)], [make_op_call_output(2*COIN, b"\x04", 22000, CScriptNum(VUI_MIN_GAS_PRICE), hex_str_to_bytes(self.share_abi), hex_str_to_bytes(self.sender2))])
T4_id = self.node.sendrawtransaction(T4_raw, 0)
T5_id = self.node.sendtocontract(self.sender2, self.withdrawAll_abi, 0, 1000000, VUI_MIN_GAS_PRICE_STR, A1)['txid']
B4_id = self.node.generate(1)[0]
B4 = self.node.getblock(B4_id)
# Coinbase, T4, R1, T5, C4
assert_equal(len(B4['tx']), 5)
assert_equal(B4['tx'][1], T4_id)
assert_equal(B4['tx'][3], T5_id)
R1_id = B4['tx'][2]
R1 = self.node.getrawtransaction(R1_id, True)
C4_id = B4['tx'][4]
C4 = self.node.getrawtransaction(C4_id, True)
assert_vout(R1, [(2, 'pubkeyhash')])
assert_vin(C4, [('OP_SPEND', ), ('OP_SPEND', )])
assert_vout(C4, [(12, 'pubkeyhash')])
assert_equal(sum(self.node.listcontracts().values()), 0)
assert("vin" not in self.node.getaccountinfo(self.sender1))
assert("vin" not in self.node.getaccountinfo(self.sender2))
assert("vin" not in self.node.getaccountinfo(self.sender3))
if __name__ == '__main__':
CondensingTxsTest().main() | assert_vout(C1, [(5, 'call'), (2.5, 'call'), (0.5, 'call')])
assert("vin" in self.node.getaccountinfo(self.sender1))
assert("vin" in self.node.getaccountinfo(self.sender2))
assert("vin" in self.node.getaccountinfo(self.sender3)) |
crypt.go | package secure
import (
"bytes"
"crypto/aes"
"crypto/cipher"
)
func EncryptIV(decrypted []byte, key []byte, iv []byte) ([]byte, error) {
ac, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
c := cipher.NewCBCEncrypter(ac, iv) | }
func DecryptIV(encrypted []byte, key []byte, iv []byte) ([]byte, error) {
ac, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
c := cipher.NewCBCDecrypter(ac, iv)
decrypted := make([]byte, len(encrypted))
c.CryptBlocks(decrypted, encrypted)
decrypted = PKCS5UnPadding(decrypted)
return decrypted, nil
}
func PKCS5Padding(src []byte, blockSize int) []byte {
padding := blockSize - len(src)%blockSize
padtext := bytes.Repeat([]byte{byte(padding)}, padding)
return append(src, padtext...)
}
func PKCS5UnPadding(src []byte) []byte {
length := len(src)
unpadding := int(src[length-1])
return src[:(length - unpadding)]
} | decrypted = PKCS5Padding(decrypted, ac.BlockSize())
encrypted := make([]byte, len(decrypted))
c.CryptBlocks(encrypted, decrypted)
return encrypted, nil |
forms.rs | //! Erlang top-level components such as attributes, directives or declarations.
use crate::format::{Format, Formatter, Indent, Newline};
use crate::items::atoms::{
CallbackAtom, DefineAtom, IncludeAtom, IncludeLibAtom, OpaqueAtom, RecordAtom, SpecAtom,
TypeAtom,
};
use crate::items::components::{
Clauses, CommaDelimiter, Either, Element, Maybe, Never, NonEmptyItems, Null, Params,
Parenthesized, TupleLike, WithArrow, WithGuard,
};
use crate::items::expressions::components::FunctionClause;
use crate::items::keywords::IfKeyword;
use crate::items::macros::{MacroName, MacroReplacement};
use crate::items::symbols::{
CloseParenSymbol, ColonSymbol, CommaSymbol, DotSymbol, DoubleColonSymbol, HyphenSymbol,
MatchSymbol, OpenParenSymbol,
};
use crate::items::tokens::{AtomToken, LexicalToken, StringToken, VariableToken};
use crate::items::Expr;
use crate::items::Type;
use crate::parse::Parse;
use crate::span::Span;
use std::path::{Path, PathBuf};
#[derive(Debug, Clone, Span, Parse, Format)]
pub(super) enum Form {
Define(DefineDirective),
Include(IncludeDirective),
FunSpec(FunSpec),
FunDecl(FunDecl),
TypeDecl(TypeDecl),
RecordDecl(RecordDecl),
Attr(Attr),
}
/// `-` `record` `(` `$NAME` `,` `{` `$FIELD`* `}` `)` `.`
///
/// - $NAME: [AtomToken]
/// - $FIELD: [AtomToken] (`=` [Expr])? (`::` [Type])? `,`?
#[derive(Debug, Clone, Span, Parse, Format)]
pub struct RecordDecl(AttrLike<RecordAtom, RecordDeclValue>);
#[derive(Debug, Clone, Span, Parse)]
struct RecordDeclValue {
name: AtomToken,
comma: CommaSymbol,
fields: TupleLike<RecordField>,
}
impl Format for RecordDeclValue {
fn format(&self, fmt: &mut Formatter) {
fmt.subregion(Indent::CurrentColumn, Newline::Never, |fmt| {
self.name.format(fmt);
self.comma.format(fmt);
fmt.add_space();
fmt.subregion(Indent::inherit(), Newline::IfTooLongOrMultiLine, |fmt| {
self.fields.format(fmt)
});
});
}
}
#[derive(Debug, Clone, Span, Parse, Element)]
struct RecordField {
name: AtomToken,
default: Maybe<(MatchSymbol, Expr)>,
r#type: Maybe<(DoubleColonSymbol, Type)>,
}
impl Format for RecordField {
fn format(&self, fmt: &mut Formatter) {
self.name.format(fmt);
if let Some((x, y)) = self.default.get() {
fmt.add_space();
x.format(fmt);
fmt.add_space();
y.format(fmt);
}
if let Some((x, y)) = self.r#type.get() {
fmt.add_space();
x.format(fmt);
fmt.add_space();
y.format(fmt);
}
}
}
/// `-` (`type` | `opaque`) `$NAME` `(` (`$PARAM` `,`?)* `)` `::` `$TYPE` `.`
///
/// - $NAME: [AtomToken]
/// - $PARAM: [VariableToken]
/// - $TYPE: [Type]
///
/// Note that the parenthesized notation like `-type(foo() :: bar()).` is also acceptable
#[derive(Debug, Clone, Span, Parse, Format)]
pub struct TypeDecl(AttrLike<TypeDeclName, TypeDeclItem>);
type TypeDeclName = Either<TypeAtom, OpaqueAtom>;
#[derive(Debug, Clone, Span, Parse)]
struct TypeDeclItem {
name: AtomToken,
params: Params<VariableToken>,
delimiter: DoubleColonSymbol,
r#type: Type,
}
impl Format for TypeDeclItem {
fn format(&self, fmt: &mut Formatter) {
fmt.subregion(Indent::CurrentColumn, Newline::Never, |fmt| {
self.name.format(fmt);
self.params.format(fmt);
fmt.add_space();
self.delimiter.format(fmt);
fmt.add_space();
fmt.subregion(Indent::Offset(2), Newline::IfTooLong, |fmt| {
self.r#type.format(fmt)
});
});
}
}
/// `-` (`spec` | `callback`) `$NAME` (`(` (`$PARAM` `,`?)* `)` `->` `$RETURN` `;`?)+ `.`
///
/// - $NAME: ([AtomToken] `:`)? [AtomToken]
/// - $PARAM: [Type]
/// - $RETURN: [Type]
///
/// Note that the parenthesized notation like `-spec(foo() -> bar()).` is also acceptable
#[derive(Debug, Clone, Span, Parse, Format)]
pub struct FunSpec(AttrLike<FunSpecName, FunSpecItem>);
type FunSpecName = Either<SpecAtom, CallbackAtom>;
#[derive(Debug, Clone, Span, Parse)]
struct FunSpecItem {
module_name: Maybe<(AtomToken, ColonSymbol)>,
function_name: AtomToken,
clauses: Clauses<SpecClause>,
}
impl Format for FunSpecItem {
fn format(&self, fmt: &mut Formatter) {
fmt.subregion(Indent::CurrentColumn, Newline::Never, |fmt| {
self.module_name.format(fmt);
self.function_name.format(fmt);
self.clauses.format(fmt);
});
}
}
#[derive(Debug, Clone, Span, Parse)]
struct SpecClause {
params: WithArrow<Params<Type>>,
r#return: WithGuard<Type, Type, CommaDelimiter>,
}
impl Format for SpecClause {
fn format(&self, fmt: &mut Formatter) {
self.params.format(fmt);
fmt.subregion(
Indent::ParentOffset(4),
Newline::IfTooLongOrMultiLine,
|fmt| self.r#return.format(fmt),
);
}
}
/// (`$NAME` `(` (`$PARAM` `,`?)* `)` (`when` `$GUARD`)? `->` `$BODY` `;`?)+ `.`
///
/// - $NAME: [AtomToken]
/// - $PARAM: [Expr]
/// - $GUARD: ([Expr] (`,` | `;`)?)+
/// - $BODY: ([Expr] `,`?)+
#[derive(Debug, Clone, Span, Parse, Format)]
pub struct FunDecl {
clauses: Clauses<FunctionClause<AtomToken>>,
dot: DotSymbol,
}
/// `-` `$NAME` `$ARGS`? `.`
///
/// - $NAME: [AtomToken] | `if`
/// - $ARGS: `(` (`$ARG` `,`?)* `)`
/// - $ARG: [Expr]
#[derive(Debug, Clone, Span, Parse, Format)]
pub struct Attr(AttrLike<AttrName, AttrValue, Null>);
type AttrName = Either<AtomToken, IfKeyword>;
type AttrValue = NonEmptyItems<Expr>;
#[derive(Debug, Clone, Span, Parse)]
struct AttrLike<Name, Value, Empty = Never> {
hyphen: HyphenSymbol,
name: Name,
value: Either<Parenthesized<Value>, Either<Value, Empty>>,
dot: DotSymbol,
}
impl<Name: Format, Value: Format, Empty: Format> Format for AttrLike<Name, Value, Empty> {
fn format(&self, fmt: &mut Formatter) {
self.hyphen.format(fmt);
self.name.format(fmt);
if matches!(self.value, Either::B(Either::A(_))) {
fmt.add_space();
}
self.value.format(fmt);
self.dot.format(fmt);
}
}
/// `-` `define` `(` `$NAME` `$VARS`? `,` `REPLACEMENT`* `)` `.`
///
/// - $NAME: [AtomToken] | [VariableToken]
/// - $VARS: `(` ([VariableToken] `,`?)* `)`
/// - $REPLACEMENT: [LexicalToken]
#[derive(Debug, Clone, Span, Parse)]
pub struct DefineDirective {
hyphen: HyphenSymbol,
define: DefineAtom,
open: OpenParenSymbol,
macro_name: MacroName,
variables: Maybe<Params<VariableToken>>,
comma: CommaSymbol,
replacement: MacroReplacement,
close: CloseParenSymbol,
dot: DotSymbol,
}
impl DefineDirective {
pub fn macro_name(&self) -> &str {
self.macro_name.value()
}
pub fn variables(&self) -> Option<&[VariableToken]> {
self.variables.get().map(|x| x.get())
}
pub fn replacement(&self) -> &[LexicalToken] {
self.replacement.tokens()
}
}
impl Format for DefineDirective {
fn format(&self, fmt: &mut Formatter) {
self.hyphen.format(fmt);
self.define.format(fmt);
self.open.format(fmt);
fmt.subregion(Indent::CurrentColumn, Newline::Never, |fmt| {
self.macro_name.format(fmt);
self.variables.format(fmt);
self.comma.format(fmt);
fmt.add_space();
fmt.subregion(Indent::inherit(), Newline::IfTooLongOrMultiLine, |fmt| {
self.replacement.format(fmt)
});
});
self.close.format(fmt);
self.dot.format(fmt);
}
}
/// `-` (`include` | `include_lib`) `(` `$PATH` `)` `.`
///
/// - $PATH: [StringToken]
#[derive(Debug, Clone, Span, Parse, Format)]
pub struct IncludeDirective {
hyphen: HyphenSymbol,
include: Either<IncludeAtom, IncludeLibAtom>,
open: OpenParenSymbol,
file: StringToken,
close: CloseParenSymbol,
dot: DotSymbol,
}
impl IncludeDirective {
pub fn path(&self) -> &str {
self.file.value()
}
pub fn var_substituted_path(&self) -> PathBuf {
let path_str = self.file.value();
let path: &Path = path_str.as_ref();
if !path_str.starts_with('$') {
return path.to_path_buf();
}
let mut expanded_path = PathBuf::new();
for (i, c) in path.components().enumerate() {
if i == 0 {
if let Some(expanded) = c
.as_os_str()
.to_str()
.and_then(|name| std::env::var(name.split_at(1).1).ok())
{
expanded_path.push(expanded);
continue;
}
}
expanded_path.push(c);
}
expanded_path
}
pub fn resolved_path(&self, include_dirs: &[PathBuf]) -> Option<PathBuf> {
let path = self.var_substituted_path();
if matches!(self.include, Either::B(_)) && path.components().count() > 1 {
let app_name = if let std::path::Component::Normal(name) = path.components().next()? {
name.to_str()?
} else {
return None;
};
match crate::erl::code_lib_dir(app_name) {
Err(e) => {
log::warn!("{}", e);
None
}
Ok(mut resolved_path) => {
resolved_path.extend(path.components().skip(1));
log::debug!("Resolved include path: {:?}", resolved_path);
Some(resolved_path)
}
}
} else if path.exists() {
log::debug!("Resolved include path: {:?}", path);
Some(path)
} else {
for dir in include_dirs {
let candidate_path = dir.join(&path);
if candidate_path.exists() {
log::debug!("Resolved include path: {:?}", candidate_path);
return Some(candidate_path);
}
}
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn define_directive_works() {
let texts = [
"-define(FOO, ).",
"-define(bar, 1 + 2).",
indoc::indoc! {"
-define(Baz(A, B),
A), { B)."},
indoc::indoc! {"
-define(name,
begin
hello,
world
end)."},
indoc::indoc! {"
-define(foo(A,
B,
C,
D,
E), F)."},
];
for text in texts {
crate::assert_format!(text, Form);
}
}
#[test]
fn include_directive_works() {
let texts = [
r#"-include("path/to/hrl")."#,
r#"-include_lib("path/to/hrl")."#,
];
for text in texts {
crate::assert_format!(text, Form);
}
}
#[test]
fn other_directive_works() {
let texts = [
"-ifdef(foo).",
"-ifndef(foo).",
"-undef(foo).", | ];
for text in texts {
crate::assert_format!(text, Form);
}
}
#[test]
fn attr_works() {
let texts = [
"-export([foo/0]).",
indoc::indoc! {"
-export([foo/0,
bar/1])."},
indoc::indoc! {"
-import(foo,
[bar/0,
baz/1])."},
indoc::indoc! {"
-dialyzer({[no_return,
no_match],
[g/0,
h/0]})."},
indoc::indoc! {"
-export [foo/0,
bar/1]."},
];
for text in texts {
crate::assert_format!(text, Form);
}
}
#[test]
fn record_decl_works() {
let texts = [
"-record(foo, {}).",
"-record(foo, {foo}).",
indoc::indoc! {"
-record(foo,
{foo, bar})."},
indoc::indoc! {"
-record(rec,
{field1 = [] :: Type1,
field2,
field3 = 421})."},
];
for text in texts {
crate::assert_format!(text, Form);
}
}
#[test]
fn fun_decl_works() {
let texts = [
indoc::indoc! {"
foo() ->
bar."},
indoc::indoc! {"
foo(A, {B, [C]}) ->
bar;
foo(_, _) ->
baz."},
indoc::indoc! {"
%---10---|%---20---|
foo(A)
when a, b; c ->
d."},
indoc::indoc! {"
foo(A)
when is_atom(A) ->
bar,
baz;
foo(_) ->
qux."},
];
for text in texts {
crate::assert_format!(text, Form);
}
}
#[test]
fn fun_spec_works() {
let texts = [
indoc::indoc! {"
-spec foo(X) -> X;
(Y) -> Y."},
indoc::indoc! {"
%---10---|%---20---|
-spec foo(A, B) ->
C;
(T0, T1) ->
T2;
(XXX,
YYY) -> Z."},
indoc::indoc! {"
-spec id(X) ->
X when X :: tuple()."},
indoc::indoc! {"
-spec id(X) ->
X when is_subtype(X,
atom()),
X :: atom()."},
indoc::indoc! {"
-callback foobar(atom()) ->
{atom(),
atom()}."},
indoc::indoc! {"
%---10---|%---20---|
-spec foobar(A) ->
{atom(),
atom()}
when A :: atom();
(a) ->
b."},
indoc::indoc! {"
-spec foo:bar() ->
baz()."},
indoc::indoc! {"
-spec(foo:bar() ->
baz())."},
];
for text in texts {
crate::assert_format!(text, Form);
}
}
#[test]
fn type_decl_works() {
let texts = [
"-type foo() :: a.",
"-type(foo() :: a).",
indoc::indoc! {"
%---10---|%---20---|
-type foo() :: bar |
baz."},
indoc::indoc! {"
%---10---|%---20---|
-type foo() ::
barr | bazz."},
indoc::indoc! {"
-type height() ::
pos_integer()."},
indoc::indoc! {"
-opaque orddict(Key,
Val) ::
[{Key,
Val}]."},
];
for text in texts {
crate::assert_format!(text, Form);
}
}
} | "-else.",
"-endif.",
"-if(true).",
"-elif(true).", |
id_test.go | // Copyright 2021 Matrix Origin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func | (t *testing.T) {
tid := NewTransientID()
assert.Equal(t, TRANSIENT_TABLE_START_ID, tid.TableID)
assert.Equal(t, true, tid.IsTransient())
newTid, _, err := ParseTBlkName(fmt.Sprintf("%d_0_0_0", TRANSIENT_TABLE_START_ID))
assert.Nil(t, err)
assert.Equal(t, true, tid.IsSameBlock(newTid))
assert.Equal(t, tid.PartID, newTid.PartID)
assert.Equal(t, uint32(0), tid.NextPart().PartID)
sid0 := ID{
TableID: 0,
SegmentID: 0,
}
assert.Equal(t, false, sid0.IsTransient())
sid1 := ID{
TableID: 0,
SegmentID: 1,
}
sid00 := ID{
TableID: 0,
SegmentID: 0,
}
assert.Equal(t, "RelationName<0:0-0-0-0-0>", sid0.String())
assert.Equal(t, false, sid0.IsSameSegment(sid1))
assert.Equal(t, true, sid0.IsSameSegment(sid00))
sid00.NextSegment()
assert.Equal(t, uint64(1), sid00.NextSegment().SegmentID)
bid0 := ID{
TableID: 0,
SegmentID: 0,
BlockID: 0,
}
bid1 := ID{
TableID: 0,
SegmentID: 0,
BlockID: 1,
}
assert.Equal(t, false, bid0.IsSameBlock(bid1))
assert.Equal(t, "RelationName<0>", bid0.TableString())
assert.Equal(t, "RelationName<0:0-0>", bid0.SegmentString())
assert.Equal(t, "RelationName<0:0-0-0>", bid0.BlockString())
assert.Equal(t, uint64(0), bid0.AsSegmentID().SegmentID)
assert.Equal(t, uint64(0), bid0.AsBlockID().BlockID)
assert.Equal(t, uint64(0), bid0.NextBlock().BlockID)
bid0.Next()
assert.Equal(t, uint64(1), bid0.Next().TableID)
bid0.NextIter()
assert.Equal(t, uint8(1), bid0.NextIter().Iter)
assert.Equal(t, uint8(0), bid0.Iter)
assert.Equal(t, "0_2_0_1_0", bid0.ToPartFileName())
assert.Equal(t, "2/0/1/0/0.0", bid0.ToPartFilePath())
assert.Equal(t, "2_0_1", bid0.ToBlockFileName())
assert.Equal(t, "2/0/1/", bid0.ToBlockFilePath())
assert.Equal(t, "2_0", bid0.ToSegmentFileName())
assert.Equal(t, "2/0/", bid0.ToSegmentFilePath())
_, _, err = ParseTBlkName("0_0_0")
assert.NotNil(t, err)
_, _, err = ParseTBlkName("a_0_0_0")
assert.NotNil(t, err)
_, _, err = ParseTBlkName("0_a_0_0")
assert.NotNil(t, err)
_, _, err = ParseTBlkName("0_0_a_0")
assert.NotNil(t, err)
_, _, err = ParseTBlkName("0_0_0_a")
assert.Nil(t, err)
_, err = ParseBlkNameToID("0_0")
assert.NotNil(t, err)
_, err = ParseBlkNameToID("a_0_0")
assert.NotNil(t, err)
_, err = ParseBlkNameToID("0_a_0")
assert.NotNil(t, err)
_, err = ParseBlkNameToID("0_0_a")
assert.NotNil(t, err)
_, err = ParseBlkNameToID("0_0_0")
assert.Nil(t, err)
_, err = ParseSegmentNameToID("0")
assert.NotNil(t, err)
_, err = ParseSegmentNameToID("a_0")
assert.NotNil(t, err)
_, err = ParseSegmentNameToID("0_a")
assert.NotNil(t, err)
_, err = ParseSegmentNameToID("0_0")
assert.Nil(t, err)
}
| TestId |
roundtrip.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !js || !wasm
// +build !js !wasm
package req
import "net/http"
// RoundTrip implements the RoundTripper interface.
//
// For higher-level HTTP client support (such as handling of cookies
// and redirects), see Get, Post, and the Client type.
// | resp, err = t.roundTrip(req)
if err != nil {
return
}
t.handleResponseBody(resp, req)
return
} | // Like the RoundTripper interface, the error types returned
// by RoundTrip are unspecified.
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { |
_pandas.py | """
We want to simplify the operations for pandas dataframes assuming we are using timeseries as the main objects.
When we have multiple timeseries, we will:
1) calculate joint index using df_index()
2) reindex each timeseries to the joint index
We then need to worry about multiple columns if there are. If none, each timeseries will be considered as pd.Series
If there are multiple columns, we will perform the calculations columns by columns.
"""
from pyg_base._types import is_df, is_str, is_num, is_tss, is_int, is_arr, is_ts, is_arrs, is_tuples, is_pd
from pyg_base._dictable import dictable
from pyg_base._as_list import as_list
from pyg_base._zip import zipper
from pyg_base._reducer import reducing, reducer
from pyg_base._decorators import wrapper
from pyg_base._loop import loop
from pyg_base._dates import dt
import pandas as pd
import numpy as np
from copy import copy
import inspect
import datetime
from operator import add, mul
__all__ = ['df_fillna', 'df_index', 'df_reindex', 'df_columns', 'presync', 'np_reindex', 'nona', 'df_slice', 'df_unslice', 'min_', 'max_', 'add_', 'mul_', 'sub_', 'div_', 'pow_']
def _list(values):
"""
>>> assert _list([1,2,[3,4,5,[6,7]],dict(a =[8,9], b=[10,[11,12]])]) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
>>> assert _list(1) == [1]
>>> assert _list(dict(a=1, b=2)) == [1,2]
"""
if isinstance(values, list):
return sum([_list(df) for df in values], [])
elif isinstance(values, dict):
return _list(list(values.values()))
else:
return [values]
@loop(list, tuple, dict)
def _index(ts):
if isinstance(ts, pd.Index):
return ts
elif is_pd(ts):
return ts.index
elif is_arr(ts):
return len(ts)
else:
raise ValueError('did not provide an index')
def _df_index(indexes, index):
if len(indexes) > 0:
if is_str(index):
if index[0].lower() == 'i':#nner
return reducing('intersection')(indexes)
elif index[0].lower() == 'o':#uter
return reducing('union')(indexes)
elif index[0].lower() == 'l':#uter
return indexes[0]
elif index[0].lower() == 'r':#uter
return indexes[-1]
else:
return _index(index)
else:
return None
def _np_index(indexes, index):
if len(indexes) > 0:
if index[0].lower() == 'i':#nner
return min(indexes)
elif index[0].lower() == 'o':#uter
return max(indexes)
elif index[0].lower() == 'l':#uter
return indexes[0]
elif index[0].lower() == 'r':#uter
return indexes[-1]
else:
return None
def df_index(seq, index = 'inner'):
"""
Determines a joint index of multiple timeseries objects.
:Parameters:
----------------
seq : sequence whose index needs to be determined
a (possible nested) sequence of timeseries/non-timeseries object within lists/dicts
index : str, optional
method to determine the index. The default is 'inner'.
:Returns:
-------
pd.Index
The joint index.
:Example:
---------
>>> tss = [pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)]
>>> more_tss_as_dict = dict(zip('abcde',[pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)]))
>>> res = df_index(tss + [more_tss_as_dict], 'inner')
>>> assert len(res) == 6
>>> res = df_index(more_tss_as_dict, 'outer')
>>> assert len(res) == 14
"""
listed = _list(seq)
indexes = [ts.index for ts in listed if is_pd(ts)]
if len(indexes):
return _df_index(indexes, index)
arrs = [len(ts) for ts in listed if is_arr(ts)]
if len(arrs):
return _np_index(arrs, index)
else:
return None
def df_columns(seq, index = 'inner'):
"""
returns the columns of the joint object
:Example:
---------
>>> a = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-99), list('abcde'))
>>> b = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-99), list('bcdef'))
>>> assert list(df_columns([a,b])) == list('bcde')
>>> assert list(df_columns([a,b], 'oj')) == list('abcdef')
>>> assert list(df_columns([a,b], 'lj')) == list('abcde')
>>> assert list(df_columns([a,b], 'rj')) == list('bcdef')
:Parameters:
----------
seq : sequence of dataframes
DESCRIPTION.
index : str, optional
how to inner-join. The default is 'inner'.
:Returns:
-------
pd.Index
list of columns.
"""
listed = _list(seq)
indexes= [ts.columns for ts in listed if is_df(ts) and ts.shape[1]>1 and len(set(ts.columns)) == ts.shape[1]] #dataframe with non-unique columns are treated like arrays
if len(indexes):
return _df_index(indexes, index)
arrs = [ts.shape[1] for ts in listed if (is_arr(ts) or is_df(ts)) and len(ts.shape)>1 and ts.shape[1]>1]
if len(arrs):
return _np_index(arrs, index)
return None
@loop(list, tuple, dict)
def _df_fillna(df, method = None, axis = 0, limit = None):
methods = as_list(method)
if len(methods) == 0:
return df
if is_arr(df):
return df_fillna(pd.DataFrame(df) if len(df.shape)==2 else pd.Series(df), method, axis, limit).values
res = df
for m in methods:
if is_num(m):
res = res.fillna(value = m, axis = axis, limit = limit)
elif m in ['backfill', 'bfill', 'pad', 'ffill']:
res = res.fillna(method = m, axis = axis, limit = limit)
elif m in ('fnna', 'nona'):
nonan = ~np.isnan(res)
if len(res.shape)==2:
nonan = nonan.max(axis=1)
if m == 'fnna':
nonan = nonan[nonan.values]
if len(nonan):
res = res[nonan.index[0]:]
else:
res = res.iloc[:0]
elif m == 'nona':
res = res[nonan.values]
else:
if is_num(limit) and limit<0:
res = res.interpolate(method = m, axis = axis, limit = abs(limit),
limit_direction = 'backward')
else:
res = res.interpolate(method = m, axis = axis, limit = limit)
return res
def df_fillna(df, method = None, axis = 0, limit = None):
"""
Equivelent to df.fillna() except:
- support np.ndarray as well as dataframes
- support multiple methods of filling/interpolation
- supports removal of nan from the start/all of the timeseries
- supports action on multiple timeseries
:Parameters:
----------------
df : dataframe/numpy array
method : string, list of strings or None, optional
Either a fill method (bfill, ffill, pad)
Or an interplation method: 'linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', 'spline', 'polynomial', 'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', 'cubicspline'
Or 'fnna': removes all to the first non nan
Or 'nona': removes all nans
axis : int, optional
axis. The default is 0.
limit : TYPE, optional
when filling, how many nan get filled. The default is None (indefinite)
:Example: method ffill or bfill
-----------------------------------------------
>>> from pyg import *; import numpy as np
>>> df = np.array([np.nan, 1., np.nan, 9, np.nan, 25])
>>> assert eq(df_fillna(df, 'ffill'), np.array([ np.nan, 1., 1., 9., 9., 25.]))
>>> assert eq(df_fillna(df, ['ffill','bfill']), np.array([ 1., 1., 1., 9., 9., 25.]))
>>> assert eq(df_fillna(df, ['ffill','bfill']), np.array([ 1., 1., 1., 9., 9., 25.]))
>>> df = np.array([np.nan, 1., np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 9, np.nan, 25])
>>> assert eq(df_fillna(df, 'ffill', limit = 2), np.array([np.nan, 1., 1., 1., np.nan, np.nan, np.nan, np.nan, 9., 9., 25.]))
df_fillna does not maintain state of latest 'prev' value: use ffill_ for that.
:Example: interpolation methods
-----------------------------------------------
>>> from pyg import *; import numpy as np
>>> df = np.array([np.nan, 1., np.nan, 9, np.nan, 25])
>>> assert eq(df_fillna(df, 'linear'), np.array([ np.nan, 1., 5., 9., 17., 25.]))
>>> assert eq(df_fillna(df, 'quadratic'), np.array([ np.nan, 1., 4., 9., 16., 25.]))
:Example: method = fnna and nona
---------------------------------------------
>>> from pyg import *; import numpy as np
>>> ts = np.array([np.nan] * 10 + [1.] * 10 + [np.nan])
>>> assert eq(df_fillna(ts, 'fnna'), np.array([1.]*10 + [np.nan]))
>>> assert eq(df_fillna(ts, 'nona'), np.array([1.]*10))
>>> assert len(df_fillna(np.array([np.nan]), 'nona')) == 0
>>> assert len(df_fillna(np.array([np.nan]), 'fnna')) == 0
:Returns:
-------
array/dataframe with nans removed/filled
"""
return _df_fillna(df, method = method, axis = axis, limit = limit)
@loop(dict, list, tuple)
def _nona(df, value = np.nan):
if np.isnan(value):
mask = np.isnan(df)
elif np.isinf(value):
mask = np.isinf(df)
else:
mask = df == value
if len(mask.shape) == 2:
mask = mask.min(axis=1) == 1
return df[~mask]
def nona(a, value = np.nan):
"""
removes rows that are entirely nan (or a specific other value)
:Parameters:
----------------
a : dataframe/ndarray
value : float, optional
value to be removed. The default is np.nan.
:Example:
----------
>>> from pyg import *
>>> a = np.array([1,np.nan,2,3])
>>> assert eq(nona(a), np.array([1,2,3]))
:Example: multiple columns
---------------------------
>>> a = np.array([[1,np.nan,2,np.nan], [np.nan, np.nan, np.nan, 3]]).T
>>> b = np.array([[1,2,np.nan], [np.nan, np.nan, 3]]).T ## 2nd row has nans across
>>> assert eq(nona(a), b)
"""
return _nona(a)
@loop(list, tuple, dict)
def _df_reindex(ts, index, method = None, limit = None):
methods = as_list(method)
if is_pd(ts):
if is_int(index):
raise ValueError('trying to reindex dataframe %s using numpy interval length %i'%(ts, index))
if len(methods) and methods[0] in ['backfill', 'bfill', 'pad', 'ffill']:
res = _nona(ts).reindex(index, method = methods[0], limit = limit)
res = _df_fillna(res, method = methods[1:], limit = limit)
else:
res = ts.reindex(index)
res = _df_fillna(res, method = method, limit = limit)
return res
elif is_arr(ts):
if isinstance(index, pd.Index):
if len(index) == len(ts):
return ts
else:
raise ValueError('trying to reindex numpy array %s using pandas index %s'%(ts, index))
elif is_int(index):
if index<len(ts):
res = ts[-index:]
elif index>len(ts):
shape = (index - len(ts),) + ts.shape[1:]
res = np.concatenate([np.full(shape, np.nan),ts])
else:
res = ts
return df_fillna(res, method = methods, limit = limit)
else:
return ts
else:
return ts
@loop(list, tuple, dict)
def _df_recolumn(ts, columns):
if columns is not None and is_df(ts) and ts.shape[1] > 1 and len(set(ts.columns)) == ts.shape[1]:
return pd.DataFrame({col: ts[col].values if col in ts.columns else np.nan for col in columns}, index = ts.index)
else:
return ts
def df_recolumn(ts, columns = None):
return _df_recolumn(ts, columns)
def np_reindex(ts, index, columns = None):
"""
pyg assumes that when working with numpy arrays representing timeseries, you:
- determine a global timestamp
- resample all timeseries to that one, and then covert to numpy.array, possibly truncating leading nan's.
- do the maths you need to do
- having worked with numpy arrays, if we want to reindex them back into dataframe, use np_reindex
:Example:
-------
>>> from pyg import *
>>> ts = np.array(np.random.normal(0,1,1000))
>>> index = pd.Index(drange(-1999))
>>> np_reindex(ts, index)
:Parameters:
----------------
ts : numpy array
index : pandas.Index
columns: list/array of columns names
:Returns:
----------
pd.DataFrame/pd.Series
"""
if is_pd(index):
index = index.index
if len(index)>len(ts):
index = index[-len(ts):]
elif len(index)<len(ts):
ts = ts[-len(index):]
res = pd.Series(ts, index) if len(ts.shape)<2 else pd.DataFrame(ts, index)
if columns is not None:
if is_df(columns):
columns = columns.columns
res.columns = columns
return res
def df_reindex(ts, index = None, method = None, limit = None):
"""
A slightly more general version of df.reindex(index)
:Parameters:
----------------
ts : dataframe or numpy array (or list/dict of theses)
timeseries to be reindexed
index : str, timeseries, pd.Index.
The new index
method : str, list of str, float, optional
various methods of handling nans are available. The default is None.
See df_fillna for a full list.
:Returns:
-------
timeseries/np.ndarray (or list/dict of theses)
timeseries reindex.
:Example: index = inner/outer
-----------------------------
>>> tss = [pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)]
>>> res = df_reindex(tss, 'inner')
>>> assert len(res[0]) == 6
>>> res = df_reindex(tss, 'outer')
>>> assert len(res[0]) == 14
:Example: index provided
-----------------------------
>>> tss = [pd.Series(np.random.normal(0,1,10), drange(-i, 9-i)) for i in range(5)]
>>> res = df_reindex(tss, tss[0])
>>> assert eq(res[0], tss[0])
>>> res = df_reindex(tss, tss[0].index)
>>> assert eq(res[0], tss[0])
"""
if index is None:
return ts
elif is_str(index):
index = df_index(ts, index)
elif is_ts(index):
index = index.index
elif is_arr(index):
index = pd.Index(index)
return _df_reindex(ts, index = index, method = method, limit = limit)
def df_concat(objs, columns = None, axis=1, join = 'outer'):
"""
simple concatenator,
- defaults to to concatenating by date (for timeseries)
- supports columns renaming
:Parameters:
----------
objs : list/dict
collection of timeseries
columns : str/list
Names of new columns. The default is None.
axis : int, optional
axis to merge. The default is 1.
join : str, optional
join method inner/outer, see pd.concat. The default is 'outer'.
:Returns:
-------
res : pd.DataFrame
joined dataframe
:Example:
---------
>>> objs = [pd.Series([1,2,3], [4,5,6]), pd.Series([3,4,5], [1,2,4])]
>>> columns = ['a', 'b'];
>>> axis = 1; join = 'outer'
>>> res = df_concat(objs, columns)
>>> res
>>> a b
>>> 1 NaN 3.0
>>> 2 NaN 4.0
>>> 4 1.0 5.0
>>> 5 2.0 NaN
>>> 6 3.0 NaN
>>> df_concat(res, dict(a = 'x', b = 'y'))
>>> res
>>> x y
>>> 1 NaN 3.0
>>> 2 NaN 4.0
>>> 4 1.0 5.0
>>> 5 2.0 NaN
>>> 6 3.0 NaN
"""
if isinstance(objs, dict):
columns = list(objs.keys())
objs = list(objs.values())
if isinstance(objs, list):
df_objs = [o for o in objs if is_pd(o)]
res = pd.concat(df_objs, axis = axis, join = join)
if len(df_objs) < len(objs):
df_objs = [o if is_pd(o) else pd.Series(o, res.index) for o in objs]
res = pd.concat(df_objs, axis = axis, join = join)
elif isinstance(objs, pd.DataFrame):
res = objs.copy() if columns is not None else objs
if columns is not None:
if isinstance(columns, list):
res.columns = columns
else:
res = res.rename(columns = columns)
return res
@loop(list, dict, tuple)
def _df_column(ts, column, i = None, n = None):
"""
This is mostly a helper function to help us loop through multiple columns.
Function grabs a column from a dataframe/2d array
:Parameters:
----------
ts : datafrane
the original dataframe or 2-d numpy array
column : str
name of the column to grab.
i : int, optional
Can grab the column using its index. The default is None.
n : int, optional
asserting the number of columns, ts.shape[1]. The default is None.
:Returns:
-------
a series or a 1-d numpy array
"""
if is_df(ts):
if ts.shape[1] == 1:
return ts[ts.columns[0]]
elif column in ts.columns:
return ts[column]
elif column is None and i is not None:
if len(set(ts.columns)) == ts.shape[1]: #unique columns, don't call me using i
raise ValueError('trying to grab %ith column from a dataframe with proper columns: %s'%(i, ts.columns))
elif n is not None and ts.shape[1]!=n:
raise ValueError('trying to grab %ith column and asserting must have %i columns but have %i'%(i, n, ts.shape[1]))
else:
if i<ts.shape[1]:
return ts.iloc[:,i]
else:
return np.nan
else:
return np.nan
elif is_arr(ts) and len(ts.shape) == 2:
if ts.shape[1] == 1:
return ts.T[0]
elif i is not None:
if n is not None and ts.shape[1]!=n:
raise ValueError('trying to grab %ith column and asserting must have %i columns but have %i'%(i, n, ts.shape[1]))
elif i<ts.shape[1]:
return ts.T[i]
else:
return np.nan
else:
return ts
else:
return ts
def df_column(ts, column, i = None, n = None):
"""
This is mostly a helper function to help us loop through multiple columns.
Function grabs a column from a dataframe/2d array
:Parameters:
----------
ts : datafrane
the original dataframe or 2-d numpy array
column : str
name of the column to grab.
i : int, optional
Can grab the column using its index. The default is None.
n : int, optional
asserting the number of columns, ts.shape[1]. The default is None.
:Returns:
-------
a series or a 1-d numpy array
"""
return _df_column(ts = ts, column = column, i = i, n = n)
def _convert(res, columns):
"""
We run a result per each column, now we want to convert it back to objects
----------
res : dict
results run per each column.
"""
values = list(res.values())
if is_tss(values):
return pd.DataFrame(res)
elif is_arrs(values) and is_int(columns):
return np.array(values).T
elif is_tuples(values):
return tuple([_convert(dict(zip(res.keys(), row)), columns) for row in zipper(*values)])
else:
return np.array(values) if is_int(columns) else pd.Series(res)
def df_sync(dfs, join = 'ij', method = None, columns = 'ij'):
"""
df_sync performs a sync of multiple dataframes
:Parameters:
----------
dfs : list or dict of timeseries
dataframes to be synched
join : str, optional
index join method. The default is 'ij'.
method : str/float, optional
how the nan's are to be filled once reindexing occurs. The default is None.
columns : str, optional
how to sync multi-column timeseries. The default is 'ij'.
:Example:
-------
>>> a = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-100,-1), list('abcde'))
>>> b = pd.DataFrame(np.random.normal(0,1,(100,5)), drange(-99), list('bcdef'))
>>> c = 'not a timeseries'
>>> d = pd.DataFrame(np.random.normal(0,1,(100,1)), drange(-98,1), ['single_column_df'])
>>> s = pd.Series(np.random.normal(0,1,105), drange(-104))
:Example: inner join on index and columns
--------------------------------
>>> dfs = [a,b,c,d,s]
>>> join = 'ij'; method = None; columns = 'ij'
>>> res = df_sync(dfs, 'ij')
>>> assert len(res[0]) == len(res[1]) == len(res[-1]) == 98
>>> assert res[2] == 'not a timeseries'
>>> assert list(res[0].columns) == list('bcde')
:Example: outer join on index and inner join on columns
--------------------------------
>>> res = df_sync(dfs, join = 'oj')
>>> assert len(res[0]) == len(res[1]) == len(res[-1]) == 106; assert res[2] == 'not a timeseries'
>>> assert list(res[0].columns) == list('bcde')
>>> res = df_sync(dfs, join = 'oj', method = 1)
>>> assert res[0].iloc[0].sum() == 4
:Example: outer join on index and columns
-------------------------------------------
>>> res = df_sync(dfs, join = 'oj', method = 1, columns = 'oj')
>>> assert res[0].iloc[0].sum() == 5
>>> assert list(res[0].columns) == list('abcdef')
>>> assert list(res[-2].columns) == ['single_column_df'] # single column unaffected
:Example: synching of dict rather than a list
-------------------------------------------
>>> dfs = Dict(a = a, b = b, c = c, d = d, s = s)
>>> res = df_sync(dfs, join = 'oj', method = 1, columns = 'oj')
>>> assert res.c == 'not a timeseries'
>>> assert res.a.shape == (106,6)
"""
if isinstance(dfs, dict):
values = list(dfs.values())
elif isinstance(dfs, (list, tuple)):
values = list(dfs)
else:
return dfs
listed = _list(values)
tss = [ts for ts in listed if is_ts(ts)]
index = df_index(listed, join)
dfs = df_reindex(dfs, index, method = method)
### now we do the columns
if columns is False or columns is None:
return dfs
else:
cols = df_columns(tss, columns)
dfs = df_recolumn(dfs, cols)
return dfs
class presync(wrapper):
"""
Much of timeseries analysis in Pandas is spent aligning multiple timeseries before feeding them into a function.
presync allows easy presynching of all paramters of a function.
:Parameters:
----------
function : callable, optional
function to be presynched. The default is None.
index : str, optional
index join policy. The default is 'inner'.
method : str/int/list of these, optional
method of nan handling. The default is None.
columns : str, optional
columns join policy. The default is 'inner'.
default : float, optional
value when no data is available. The default is np.nan.
:Returns:
-------
presynch-decorated function
:Example:
-------
>>> from pyg import *
>>> x = pd.Series([1,2,3,4], drange(-3))
>>> y = pd.Series([1,2,3,4], drange(-4,-1))
>>> z = pd.DataFrame([[1,2],[3,4]], drange(-3,-2), ['a','b'])
>>> addition = lambda a, b: a+b
#We get some nonsensical results:
>>> assert list(addition(x,z).columns) == list(x.index) + ['a', 'b']
#But:
>>> assert list(presync(addition)(x,z).columns) == ['a', 'b']
>>> res = presync(addition, index='outer', method = 'ffill')(x,z)
>>> assert eq(res.a.values, np.array([2,5,6,7]))
:Example 2: alignment works for parameters 'buried' within...
-------------------------------------------------------
>>> function = lambda a, b: a['x'] + a['y'] + b
>>> f = presync(function, 'outer', method = 'ffill')
>>> res = f(dict(x = x, y = y), b = z)
>>> assert eq(res, pd.DataFrame(dict(a = [np.nan, 4, 8, 10, 11], b = [np.nan, 5, 9, 11, 12]), index = drange(-4)))
| >>> addition = lambda a, b: a+b
>>> a = presync(addition)
>>> assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([[1,2,3,4]]).T), pd.Series([2,4,6,8], drange(-3)))
>>> assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([1,2,3,4])), pd.Series([2,4,6,8], drange(-3)))
>>> assert eq(a(pd.Series([1,2,3,4], drange(-3)), np.array([[1,2,3,4],[5,6,7,8]]).T), pd.DataFrame({0:[2,4,6,8], 1:[6,8,10,12]}, drange(-3)))
>>> assert eq(a(np.array([1,2,3,4]), np.array([[1,2,3,4]]).T), np.array([2,4,6,8]))
:Example 4: inner join alignment of columns in dataframes by default
---------------------------------------------------------------------
>>> x = pd.DataFrame({'a':[2,4,6,8], 'b':[6,8,10,12.]}, drange(-3))
>>> y = pd.DataFrame({'wrong':[2,4,6,8], 'columns':[6,8,10,12]}, drange(-3))
>>> assert len(a(x,y)) == 0
>>> y = pd.DataFrame({'a':[2,4,6,8], 'other':[6,8,10,12.]}, drange(-3))
>>> assert eq(a(x,y),x[['a']]*2)
>>> y = pd.DataFrame({'a':[2,4,6,8], 'b':[6,8,10,12.]}, drange(-3))
>>> assert eq(a(x,y),x*2)
>>> y = pd.DataFrame({'column name for a single column dataframe is ignored':[1,1,1,1]}, drange(-3))
>>> assert eq(a(x,y),x+1)
>>> a = presync(addition, columns = 'outer')
>>> y = pd.DataFrame({'other':[2,4,6,8], 'a':[6,8,10,12]}, drange(-3))
>>> assert sorted(a(x,y).columns) == ['a','b','other']
:Example 4: ffilling, bfilling
------------------------------
>>> x = pd.Series([1.,np.nan,3.,4.], drange(-3))
>>> y = pd.Series([1.,np.nan,3.,4.], drange(-4,-1))
>>> assert eq(a(x,y), pd.Series([np.nan, np.nan,7], drange(-3,-1)))
but, we provide easy conversion of internal parameters of presync:
>>> assert eq(a.ffill(x,y), pd.Series([2,4,7], drange(-3,-1)))
>>> assert eq(a.bfill(x,y), pd.Series([4,6,7], drange(-3,-1)))
>>> assert eq(a.oj(x,y), pd.Series([np.nan, np.nan, np.nan, 7, np.nan], drange(-4)))
>>> assert eq(a.oj.ffill(x,y), pd.Series([np.nan, 2, 4, 7, 8], drange(-4)))
:Example 5: indexing to a specific index
----------------------------------------
>>> index = pd.Index([dt(-3), dt(-1)])
>>> a = presync(addition, index = index)
>>> x = pd.Series([1.,np.nan,3.,4.], drange(-3))
>>> y = pd.Series([1.,np.nan,3.,4.], drange(-4,-1))
>>> assert eq(a(x,y), pd.Series([np.nan, 7], index))
:Example 6: returning complicated stuff
----------------------------------------
>>> from pyg import *
>>> a = pd.DataFrame(np.random.normal(0,1,(100,10)), drange(-99))
>>> b = pd.DataFrame(np.random.normal(0,1,(100,10)), drange(-99))
>>> def f(a, b):
>>> return (a*b, ts_sum(a), ts_sum(b))
>>> old = f(a,b)
>>> self = presync(f)
>>> args = (); kwargs = dict(a = a, b = b)
>>> new = self(*args, **kwargs)
>>> assert eq(new, old)
"""
def __init__(self, function = None, index = 'inner', method = None, columns = 'inner', default = np.nan):
super(presync, self).__init__(function = function, index = index, method = method, columns = columns , default = default)
@property
def ij(self):
return copy(self) + dict(index = 'inner')
@property
def oj(self):
return self + dict(index = 'outer')
@property
def lj(self):
return self + dict(index = 'left')
@property
def rj(self):
return self + dict(index = 'right')
@property
def ffill(self):
return copy(self) + dict(method = 'ffill')
@property
def bfill(self):
return self + dict(method = 'bfill')
def wrapped(self, *args, **kwargs):
_idx = kwargs.pop('join', self.index)
_method = kwargs.pop('method', self.method)
_columns = kwargs.pop('columns', self.columns)
values = list(args) + list(kwargs.values())
listed = _list(values)
tss = [ts for ts in listed if is_ts(ts)]
callargs = inspect.getcallargs(self.function, *args, **kwargs)
if is_str(_idx) and _idx in callargs:
index = _index(callargs[_idx])
else:
index = df_index(listed, _idx)
args_= df_reindex(args, index, method = _method)
kwargs_= df_reindex(kwargs, index, method = _method)
### now we do the columns
if _columns is False:
return self.function(*args_, **kwargs_)
else:
cols = [tuple(ts.columns) for ts in tss if is_df(ts) and ts.shape[1]>1]
if len(set(cols))==1: # special case where all 2-d dataframes have same column headers
columns = cols[0]
n = len(columns)
res = {column: self.function(*df_column(args_,column = column, i = i, n = n), **df_column(kwargs_, column=column, i = i, n = n)) for i, column in enumerate(columns)}
else:
columns = df_columns(listed, _columns)
if is_int(columns):
res = {i: self.function(*df_column(args_, column = None, i = i), **df_column(kwargs_, column=None, i = i)) for i in range(columns)}
elif columns is None:
return self.function(*df_column(args_, column = None), **df_column(kwargs_, column = None))
else:
columns = list(columns) if isinstance(columns, pd.Index) else as_list(columns)
columns = sorted(columns)
res = {column: self.function(*df_column(args_,column = column), **df_column(kwargs_, column=column)) for column in columns}
converted = _convert(res, columns)
return converted
@presync
def _div_(a, b):
"""
division of a by b supporting presynching (inner join) of timeseries
"""
return a/b
@presync
def _sub_(a, b):
"""
subtraction of b from a supporting presynching (inner join) of timeseries
"""
return a-b
@presync
def _add_(a, b):
"""
addition of a and b supporting presynching (inner join) of timeseries
"""
return a + b
@presync
def _mul_(a, b):
"""
multiplication of b and a supporting presynching (inner join) of timeseries
"""
return a * b
@presync
def _pow_(a, b):
"""
equivalent to a**b supporting presynching (inner join) of timeseries
"""
return a**b
def add_(a, b = None, join = 'ij', method = None, columns = 'ij'):
"""
a = pd.Series([1,2,3], drange(-2))
b = pd.Series([1,2,3], drange(-3,-1))
add_(a,b, 'oj', method = 0)
addition of a and b supporting presynching (inner join) of timeseries
"""
dfs = as_list(a) + as_list(b)
f = lambda a, b: _add_(a, b, join = join, method = method, columns = columns)
return reducer(f, dfs)
def mul_(a, b = None, join = 'ij', method = None, columns = 'ij'):
"""
multiplication of a and b supporting presynching (inner join) of timeseries
mul_(a,b,join = 'oj', method = 'ffill')
cell(mul_, a = a, b = b, join = 'oj')()
"""
dfs = as_list(a) + as_list(b)
f = lambda a, b: _mul_(a, b, join = join, method = method, columns = columns)
return reducer(f, dfs)
def div_(a, b, join = 'ij', method = None, columns = 'ij'):
"""
division of a by b supporting presynching (inner join) of timeseries
"""
if isinstance(a, list):
a = mul_(a, join = join, method = method, columns = columns)
if isinstance(b, list):
b = mul_(b, join = join, method = method, columns = columns)
return _div_(a, b, join = join, method = method, columns = columns)
def sub_(a, b, join = 'ij', method = None, columns = 'ij'):
"""
subtraction of b from a supporting presynching (inner join) of timeseries
"""
if isinstance(a, list):
a = add_(a, join = join, method = method, columns = columns)
if isinstance(b, list):
b = add_(b, join = join, method = method, columns = columns)
return _sub_(a, b, join = join, method = method, columns = columns)
def pow_(a, b, join = 'ij', method = None, columns = 'ij'):
"""
equivalent to a**b supporting presynching (inner join) of timeseries
"""
return _pow_(a,b, join = join, method = method, columns = columns)
def min_(a, b = None, join = 'ij', method = None, columns = 'ij'):
"""
equivalent to redced np.minimum operation supporting presynching of timeseries
"""
dfs = as_list(a) + as_list(b)
dfs = df_sync(dfs, join = join, method = method, columns = columns)
return reducer(np.minimum, dfs)
def max_(a, b = None, join = 'ij', method = None, columns = 'ij'):
"""
equivalent to redced np.minimum operation supporting presynching of timeseries
"""
dfs = as_list(a) + as_list(b)
dfs = df_sync(dfs, join = join, method = method, columns = columns)
return reducer(np.maximum, dfs)
def _closed(oc):
if oc in '()oO':
return False
elif oc in '[]cC':
return True
else:
raise ValueError('not sure how to parse boundary %s'%oc)
def _df_slice(df, lb = None, ub = None, openclose = '[)'):
"""
Performs a one-time slice of the dataframe. Does not stich slices together
pandas slices has two issues:
1) it fails for timeseries quite a but
2) for timeseries df[dt1:dt2] is close-close while for normal dataframe df[lb,ub] is close-open
"""
if isinstance(df, (pd.Index, pd.Series, pd.DataFrame)) and len(df)>0 and (ub is not None or lb is not None):
l,u = openclose if openclose else '[)'
l = _closed(l); u = _closed(u)
if is_ts(df):
lb = lb if lb is None or isinstance(lb, datetime.time) else dt(lb)
ub = ub if ub is None or isinstance(ub, datetime.time) else dt(ub)
if (l or lb is None) and (u or ub is None):
try:
return df[lb:ub]
except Exception:
pass
elif (l or lb is None) and (ub is None or not u):
try:
return df[lb:ub]
except Exception:
pass
if lb is not None:
index = df if isinstance(df, pd.Index) else df.index
if isinstance(lb, datetime.time):
index = index.time
df = df[index>=lb] if l else df[index>lb]
if ub is not None:
index = df if isinstance(df, pd.Index) else df.index
if isinstance(ub, datetime.time):
index = index.time
df = df[index<=ub] if u else df[index<ub]
return df
def df_slice(df, lb = None, ub = None, openclose = '(]', n = 1):
"""
slices a dataframe/series/index based on lower/upper bounds.
If multiple timeseries are sliced at different times, will then stitch them together.
:Parameters:
----------
df : dataframe
Either a single dataframe or a list of dataframes.
lb : single or multiple lower bounds
lower bounds to cut the data.
ub : single or multiple upper bounds
upper bounds to cut the data
openclose : 2-character string
defines how left/right boundary behave.
[,] or c : close
(,) or o : open
' ' : do not cut
:Returns:
-------
filtered (and possibly stictched) timeseries
:Example: single timeseries filtering
---------
>>> df = pd.Series(np.random.normal(0,1,1000), drange(-999))
>>> df_slice(df, None, '-1m')
>>> df_slice(df, '-1m', None)
:Example: single timeseries, multiple filtering
---------
>>> df = pd.Series(np.random.normal(0,1,1000), drange(-999))
>>> lb = jan1 = drange(2018, None, '1y')
>>> ub = feb1 = drange(dt(2018,2,1), None, '1y')
>>> assert set(df_slice(df, jan1, feb1).index.month) == {1}
:Example: single timeseries time of day filtering
---------
>>> dates = drange(-5, 0, '5n')
>>> df = pd.Series(np.random.normal(0,1,12*24*5+1), dates)
>>> assert len(df_slice(df, None, datetime.time(hour = 10))) == 606
>>> assert len(df_slice(df, datetime.time(hour = 5), datetime.time(hour = 10))) == 300
>>> assert len(df_slice(df, lb = datetime.time(hour = 10), ub = datetime.time(hour = 5))) == len(dates) - 300
:Example: stitching together multiple future contracts for a continuous price
---------
>>> ub = drange(1980, 2000, '3m')
>>> df = [pd.Series(np.random.normal(0,1,1000), drange(-999, date)) for date in ub]
>>> df_slice(df, ub = ub)
:Example: stitching together multiple future contracts for a continuous price in front 5 contracts
---------
>>> ub = drange(1980, 2000, '3m')
>>> df = [pd.Series(np.random.normal(0,1,1000), drange(-999, date)) for date in ub]
>>> df_slice(df, ub = ub, n = 5).iloc[500:]
:Example: stitching together symbols
---------
>>> from pyg import *
>>> ub = drange(1980, 2000, '3m')
>>> df = loop(list)(dt2str)(ub)
>>> df_slice(df, ub = ub, n = 3)
"""
if isinstance(lb, tuple) and len(lb) == 2 and ub is None:
lb, ub = lb
if isinstance(ub, datetime.time) and isinstance(lb, datetime.time) and lb>ub:
pre = df_slice(df, None, ub)
post = df_slice(df, lb, None)
return pd.concat([pre, post]).sort_index()
if isinstance(df, list):
if isinstance(lb, list) and ub is None:
ub = lb[1:] + [None]
elif isinstance(ub, list) and lb is None:
lb = [None] + ub[:-1]
boundaries = sorted(set([date for date in lb + ub if date is not None]))
df = [d if is_pd(d) else pd.Series(d, boundaries) for d in df]
if n > 1:
df = [pd.concat(df[i: i+n], axis = 1) for i in range(len(df))]
for d in df:
d.columns = range(d.shape[1])
dfs = as_list(df)
dlu = zipper(dfs, lb, ub)
res = [_df_slice(d, lb = l, ub = u, openclose = openclose) for d, l, u in dlu]
if len(res) == 0:
return None
elif len(res) == 1:
return res[0]
elif isinstance(lb, list) and isinstance(ub, list):
res = pd.concat(res)
return res
def df_unslice(df, ub):
"""
If we have a rolled multi-column timeseries, and we want to know where each timeseries is originally associated with.
As long as you provide the stiching points, forming the upper bound of each original timeseries,
df_unslice will return a dict from each upper bound to a single-column timeseries
:Example:
---------
>>> ub = drange(1980, 2000, '3m')
>>> dfs = [pd.Series(date.year * 100 + date.month, drange(-999, date)) for date in ub]
>>> df = df_slice(dfs, ub = ub, n = 10)
>>> df.iloc[700:-700:]
>>> 0 1 2 3 4 5 6 7 8 9
>>> 1979-03-08 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN
>>> 1979-03-09 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN
>>> 1979-03-10 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN
>>> 1979-03-11 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN
>>> 1979-03-12 198001.0 198004.0 198007.0 198010.0 198101.0 198104.0 198107.0 198110.0 NaN NaN
>>> ... ... ... ... ... ... ... ... .. ..
>>> 1998-01-27 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN
>>> 1998-01-28 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN
>>> 1998-01-29 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN
>>> 1998-01-30 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN
>>> 1998-01-31 199804.0 199807.0 199810.0 199901.0 199904.0 199907.0 199910.0 200001.0 NaN NaN
>>> res = df_unslice(df, ub)
>>> res[ub[0]]
>>> 1977-04-07 198001.0
>>> 1977-04-08 198001.0
>>> 1977-04-09 198001.0
>>> 1977-04-10 198001.0
>>> 1977-04-11 198001.0
>>> ...
>>> 1979-12-28 198001.0
>>> 1979-12-29 198001.0
>>> 1979-12-30 198001.0
>>> 1979-12-31 198001.0
>>> 1980-01-01 198001.0
>>> Name: 0, Length: 1000, dtype: float64
We can then even slice the data again:
>>> assert eq(df_slice(list(res.values()), ub = ub, n = 10), df)
"""
n = df.shape[1] if is_df(df) else 1
res = dictable(ub = ub, lb = [None] + ub[:-1], i = range(len(ub)))
res = res(ts = lambda lb, ub: df_slice(df, lb, ub, '(]'))
res = res(rs = lambda i, ts: dictable(u = ub[i: i+n], j = range(len(ub[i: i+n])))(ts = lambda j: ts[j]))
rs = dictable.concat(res.rs).listby('u').do([pd.concat, nona], 'ts')
return dict(rs['u', 'ts']) | :Example 3: alignment of numpy arrays
------------------------------------- |
plan_insert_into.rs | // Copyright 2021 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use common_datablocks::DataBlock;
use common_datavalues::DataSchemaRef;
use common_meta_types::MetaId;
use crate::PlanNode;
#[derive(serde::Serialize, serde::Deserialize, Clone)]
pub enum InsertInputSource {
SelectPlan(Box<PlanNode>),
StreamingWithFormat(String),
Values(InsertValueBlock),
}
#[derive(serde::Serialize, serde::Deserialize, Clone)]
pub struct InsertValueBlock {
#[serde(skip)]
pub block: DataBlock,
}
#[derive(serde::Serialize, serde::Deserialize, Clone)]
pub struct InsertPlan {
pub database_name: String,
pub table_name: String,
pub table_id: MetaId,
pub schema: DataSchemaRef,
pub overwrite: bool,
pub source: InsertInputSource,
}
impl PartialEq for InsertPlan {
fn eq(&self, other: &Self) -> bool {
self.database_name == other.database_name
&& self.table_name == other.table_name
&& self.schema == other.schema
}
}
impl InsertPlan {
pub fn schema(&self) -> DataSchemaRef {
self.schema.clone()
}
pub fn has_select_plan(&self) -> bool {
matches!(&self.source, InsertInputSource::SelectPlan(_))
}
} | // distributed under the License is distributed on an "AS IS" BASIS, |
test_cli_mgmt_sql_managed_instance.py | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# Current Operation Coverage:
# ManagedInstances: 6/8
# ManagedInstanceOperations: 1/3
import unittest
import azure.mgmt.sql
from azure.core.exceptions import HttpResponseError
from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, ResourceGroupPreparer, recorded_by_proxy
AZURE_LOCATION = 'eastus'
class MgmtSqlTest(AzureMgmtRecordedTestCase):
def setup_method(self, method):
self.client = self.create_mgmt_client(
azure.mgmt.sql.SqlManagementClient | )
# self.mgmt_client180601 = self.create_mgmt_client(
# azure.mgmt.sql.SqlManagementClient,
# api_version="2018-06-01-preview"
# )
# if self.is_live:
# from azure.mgmt.network import NetworkManagementClient
# self.network_client = self.create_mgmt_client(
# NetworkManagementClient
# )
def create_virtual_network(self, group_name, location, security_group_name, route_table_name, network_name, subnet_name):
# Create network security group
network_security_group = self.network_client.network_security_groups.begin_create_or_update(
group_name,
security_group_name,
{
"location": location
}
).result()
# Create security rule
security_rule = self.network_client.security_rules.begin_create_or_update(
group_name,
security_group_name,
"allow_tds_inbound",
{
"protocol": "Tcp",
"access": "Allow",
"direction": "Inbound",
"source_port_range": "*",
"source_address_prefix": "10.0.0.0/16",
"destination_address_prefix": "*",
"destination_port_range": "1433",
"priority": "1000"
}
).result()
# Create security rule
security_rule = self.network_client.security_rules.begin_create_or_update(
group_name,
security_group_name,
"allow_redirect_inbound",
{
"protocol": "Tcp",
"access": "Allow",
"direction": "Inbound",
"source_port_range": "*",
"source_address_prefix": "10.0.0.0/16",
"destination_address_prefix": "*",
"destination_port_range": "11000-11999",
"priority": "1100"
}
).result()
# Create security rule
security_rule = self.network_client.security_rules.begin_create_or_update(
group_name,
security_group_name,
"deny_all_inbound",
{
"protocol": "*",
"access": "Deny",
"direction": "Inbound",
"source_port_range": "*",
"source_address_prefix": "*",
"destination_address_prefix": "*",
"destination_port_range": "*",
"priority": "4096"
}
).result()
# Create security rule
security_rule = self.network_client.security_rules.begin_create_or_update(
group_name,
security_group_name,
"deny_all_outbound",
{
"protocol": "*",
"access": "Deny",
"direction": "Outbound",
"source_port_range": "*",
"source_address_prefix": "*",
"destination_address_prefix": "*",
"destination_port_range": "*",
"priority": "4095"
}
).result()
# Create route table
route_table = self.network_client.route_tables.begin_create_or_update(
group_name,
route_table_name,
{
"location": location
}
).result()
# create virtual network
azure_operation_poller = self.network_client.virtual_networks.begin_create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
# create subnet
async_subnet_creation = self.network_client.subnets.begin_create_or_update(
group_name,
network_name,
subnet_name,
{
'address_prefix': '10.0.0.0/24',
'network_security_group': network_security_group,
'route_table': route_table,
'delegations': [
{
"service_name": "Microsoft.Sql/managedInstances",
"name": "dgManagedInstancexxx"
}
]
}
)
subnet_info = async_subnet_creation.result()
return subnet_info
@recorded_by_proxy
def test_instance_operation(self):
RESOURCE_GROUP = "testManagedInstance"
MANAGED_INSTANCE_NAME = "testinstancexxy"
#--------------------------------------------------------------------------
# /ManagedInstanceOperations/get/List the managed instance management operations[get]
#--------------------------------------------------------------------------
# result = self.client.managed_instance_operations.list_by_managed_instance(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME)
result = self.client.managed_instance_operations.list()
page_result = [item for item in result]
#--------------------------------------------------------------------------
# /ManagedInstanceOperations/get/Gets the managed instance management operation[get]
#--------------------------------------------------------------------------
# result = self.mgmt_client.managed_instance_operations.get(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, operation_id=OPERATION_ID)
#--------------------------------------------------------------------------
# /ManagedInstanceOperations/post/Cancel the managed instance management operation[post]
#--------------------------------------------------------------------------
# result = self.mgmt_client.managed_instance_operations.cancel(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, operation_id=OPERATION_ID)
@unittest.skip("it will take a long time.")
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_managed_instances(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
RESOURCE_GROUP = resource_group.name
VIRTUAL_NETWORK_NAME = "myVirtualNetwork"
SUBNET_NAME = "mysubnet"
NETWORK_SECURITY_GROUP = "mynetworksecuritygroup"
ROUTE_TABLE = "myroutetable"
MANAGED_INSTANCE_NAME = "mymanagedinstancexpnvcxxvx"
INSTANCE_POOL_NAME = "myinstancepool"
if self.is_live:
self.create_virtual_network(RESOURCE_GROUP, AZURE_LOCATION, NETWORK_SECURITY_GROUP, ROUTE_TABLE, VIRTUAL_NETWORK_NAME, SUBNET_NAME)
#--------------------------------------------------------------------------
# /ManagedInstances/put/Create managed instance with minimal properties[put]
#--------------------------------------------------------------------------
BODY = {
"sku": {
# "name": "BC_Gen5",
# "tier": "GeneralPurpose"
"name": "MIGP8G4",
"tier": "GeneralPurpose",
"family": "Gen5"
},
"location": "westeurope",
"administrator_login": "dummylogin",
"administrator_login_password": "Un53cuRE!",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME,
"storage_account_type": "GRS",
# "v_cores": "8",
# "storage_size_in_gb": "128",
# "collection": "Serbian_Cyrillic_100_CS_AS",
# "public_data_endpoint_enabled": True,
# "proxy_override": "Proxy",
# "timezone_id": "Central European Standard Time",
# "minimal_tls_version": "1.2",
# "license_type": "LicenseIncluded"
}
result = self.mgmt_client180601.managed_instances.begin_create_or_update(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, parameters=BODY)
# [Kaihui] it will use 6 hours to complete creation, so comment it.
# result = result.result()
#--------------------------------------------------------------------------
# /ManagedInstances/get/List managed instances by instance pool[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.managed_instances.list_by_instance_pool(resource_group_name=RESOURCE_GROUP, instance_pool_name=INSTANCE_POOL_NAME)
#--------------------------------------------------------------------------
# /ManagedInstances/get/Get managed instance[get]
#--------------------------------------------------------------------------
# result = self.mgmt_client.managed_instances.get(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME)
#--------------------------------------------------------------------------
# /ManagedInstances/get/List managed instances by resource group[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.managed_instances.list_by_resource_group(resource_group_name=RESOURCE_GROUP)
#--------------------------------------------------------------------------
# /ManagedInstances/get/List managed instances[get]
#--------------------------------------------------------------------------
result = self.mgmt_client.managed_instances.list()
#--------------------------------------------------------------------------
# /ManagedInstances/post/Failover a managed instance.[post]
#--------------------------------------------------------------------------
# result = self.mgmt_client.managed_instances.begin_failover(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, replica_type="Primary")
# result = result.result()
# #--------------------------------------------------------------------------
# # /ManagedInstances/patch/Update managed instance with minimal properties[patch]
# #--------------------------------------------------------------------------
# BODY = {
# "administrator_login": "dummylogin",
# "administrator_login_password": "Un53cuRE!",
# "subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME,
# "v_cores": "8",
# "storage_size_in_gb": "128",
# "collection": "Serbian_Cyrillic_100_CS_AS",
# "public_data_endpoint_enabled": True,
# "proxy_override": "Proxy",
# "timezone_id": "Central European Standard Time",
# "minimal_tls_version": "1.2"
# }
# result = self.mgmt_client.managed_instances.begin_update(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME, parameters=BODY)
# result = result.result()
#--------------------------------------------------------------------------
# /ManagedInstances/delete/Delete managed instance[delete]
#--------------------------------------------------------------------------
result = self.mgmt_client.managed_instances.begin_delete(resource_group_name=RESOURCE_GROUP, managed_instance_name=MANAGED_INSTANCE_NAME)
result = result.result() | |
__init__.py | """Support for Hass.io."""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.homeassistant import SERVICE_CHECK_CONFIG
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_NAME,
EVENT_CORE_CONFIG_UPDATE,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
)
from homeassistant.core import DOMAIN as HASS_DOMAIN, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
from .addon_panel import async_setup_addon_panel
from .auth import async_setup_auth_view
from .discovery import async_setup_discovery_view
from .handler import HassIO, HassioAPIError
from .http import HassIOView
from .ingress import async_setup_ingress_view
_LOGGER = logging.getLogger(__name__)
DOMAIN = "hassio"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_FRONTEND_REPO = "development_repo"
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): vol.Schema({vol.Optional(CONF_FRONTEND_REPO): cv.isdir})},
extra=vol.ALLOW_EXTRA,
)
DATA_HOMEASSISTANT_VERSION = "hassio_hass_version"
HASSIO_UPDATE_INTERVAL = timedelta(minutes=55)
SERVICE_ADDON_START = "addon_start"
SERVICE_ADDON_STOP = "addon_stop"
SERVICE_ADDON_RESTART = "addon_restart"
SERVICE_ADDON_STDIN = "addon_stdin"
SERVICE_HOST_SHUTDOWN = "host_shutdown"
SERVICE_HOST_REBOOT = "host_reboot"
SERVICE_SNAPSHOT_FULL = "snapshot_full"
SERVICE_SNAPSHOT_PARTIAL = "snapshot_partial"
SERVICE_RESTORE_FULL = "restore_full"
SERVICE_RESTORE_PARTIAL = "restore_partial"
ATTR_ADDON = "addon"
ATTR_INPUT = "input"
ATTR_SNAPSHOT = "snapshot"
ATTR_ADDONS = "addons"
ATTR_FOLDERS = "folders"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_PASSWORD = "password"
SCHEMA_NO_DATA = vol.Schema({})
SCHEMA_ADDON = vol.Schema({vol.Required(ATTR_ADDON): cv.slug})
SCHEMA_ADDON_STDIN = SCHEMA_ADDON.extend(
{vol.Required(ATTR_INPUT): vol.Any(dict, cv.string)}
)
SCHEMA_SNAPSHOT_FULL = vol.Schema(
{vol.Optional(ATTR_NAME): cv.string, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_SNAPSHOT_PARTIAL = SCHEMA_SNAPSHOT_FULL.extend(
{
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
SCHEMA_RESTORE_FULL = vol.Schema(
{vol.Required(ATTR_SNAPSHOT): cv.slug, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend(
{
vol.Optional(ATTR_HOMEASSISTANT): cv.boolean,
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
MAP_SERVICE_API = {
SERVICE_ADDON_START: ("/addons/{addon}/start", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STOP: ("/addons/{addon}/stop", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_RESTART: ("/addons/{addon}/restart", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STDIN: ("/addons/{addon}/stdin", SCHEMA_ADDON_STDIN, 60, False),
SERVICE_HOST_SHUTDOWN: ("/host/shutdown", SCHEMA_NO_DATA, 60, False),
SERVICE_HOST_REBOOT: ("/host/reboot", SCHEMA_NO_DATA, 60, False),
SERVICE_SNAPSHOT_FULL: ("/snapshots/new/full", SCHEMA_SNAPSHOT_FULL, 300, True),
SERVICE_SNAPSHOT_PARTIAL: (
"/snapshots/new/partial",
SCHEMA_SNAPSHOT_PARTIAL,
300,
True,
),
SERVICE_RESTORE_FULL: (
"/snapshots/{snapshot}/restore/full",
SCHEMA_RESTORE_FULL,
300,
True,
),
SERVICE_RESTORE_PARTIAL: (
"/snapshots/{snapshot}/restore/partial",
SCHEMA_RESTORE_PARTIAL,
300,
True,
),
}
@callback
@bind_hass
def get_homeassistant_version(hass):
|
@callback
@bind_hass
def is_hassio(hass):
"""Return true if Hass.io is loaded.
Async friendly.
"""
return DOMAIN in hass.config.components
async def async_setup(hass, config):
"""Set up the Hass.io component."""
# Check local setup
for env in ("HASSIO", "HASSIO_TOKEN"):
if os.environ.get(env):
continue
_LOGGER.error("Missing %s environment variable.", env)
return False
host = os.environ["HASSIO"]
websession = hass.helpers.aiohttp_client.async_get_clientsession()
hass.data[DOMAIN] = hassio = HassIO(hass.loop, websession, host)
if not await hassio.is_connected():
_LOGGER.warning("Not connected with Hass.io / system to busy!")
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
data = await store.async_load()
if data is None:
data = {}
refresh_token = None
if "hassio_user" in data:
user = await hass.auth.async_get_user(data["hassio_user"])
if user and user.refresh_tokens:
refresh_token = list(user.refresh_tokens.values())[0]
# Migrate old Hass.io users to be admin.
if not user.is_admin:
await hass.auth.async_update_user(user, group_ids=[GROUP_ID_ADMIN])
if refresh_token is None:
user = await hass.auth.async_create_system_user("Hass.io", [GROUP_ID_ADMIN])
refresh_token = await hass.auth.async_create_refresh_token(user)
data["hassio_user"] = user.id
await store.async_save(data)
# This overrides the normal API call that would be forwarded
development_repo = config.get(DOMAIN, {}).get(CONF_FRONTEND_REPO)
if development_repo is not None:
hass.http.register_static_path(
"/api/hassio/app", os.path.join(development_repo, "hassio/build"), False
)
hass.http.register_view(HassIOView(host, websession))
if "frontend" in hass.config.components:
await hass.components.panel_custom.async_register_panel(
frontend_url_path="hassio",
webcomponent_name="hassio-main",
sidebar_title="Hass.io",
sidebar_icon="hass:home-assistant",
js_url="/api/hassio/app/entrypoint.js",
embed_iframe=True,
require_admin=True,
)
await hassio.update_hass_api(config.get("http", {}), refresh_token)
async def push_config(_):
"""Push core config to Hass.io."""
await hassio.update_hass_timezone(str(hass.config.time_zone))
hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, push_config)
await push_config(None)
async def async_service_handler(service):
"""Handle service calls for Hass.io."""
api_command = MAP_SERVICE_API[service.service][0]
data = service.data.copy()
addon = data.pop(ATTR_ADDON, None)
snapshot = data.pop(ATTR_SNAPSHOT, None)
payload = None
# Pass data to Hass.io API
if service.service == SERVICE_ADDON_STDIN:
payload = data[ATTR_INPUT]
elif MAP_SERVICE_API[service.service][3]:
payload = data
# Call API
try:
await hassio.send_command(
api_command.format(addon=addon, snapshot=snapshot),
payload=payload,
timeout=MAP_SERVICE_API[service.service][2],
)
except HassioAPIError as err:
_LOGGER.error("Error on Hass.io API: %s", err)
for service, settings in MAP_SERVICE_API.items():
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=settings[1]
)
async def update_homeassistant_version(now):
"""Update last available Home Assistant version."""
try:
data = await hassio.get_homeassistant_info()
hass.data[DATA_HOMEASSISTANT_VERSION] = data["last_version"]
except HassioAPIError as err:
_LOGGER.warning("Can't read last version: %s", err)
hass.helpers.event.async_track_point_in_utc_time(
update_homeassistant_version, utcnow() + HASSIO_UPDATE_INTERVAL
)
# Fetch last version
await update_homeassistant_version(None)
async def async_handle_core_service(call):
"""Service handler for handling core services."""
if call.service == SERVICE_HOMEASSISTANT_STOP:
await hassio.stop_homeassistant()
return
try:
errors = await conf_util.async_check_ha_config_file(hass)
except HomeAssistantError:
return
if errors:
_LOGGER.error(errors)
hass.components.persistent_notification.async_create(
"Config error. See [the logs](/developer-tools/logs) for details.",
"Config validating",
f"{HASS_DOMAIN}.check_config",
)
return
if call.service == SERVICE_HOMEASSISTANT_RESTART:
await hassio.restart_homeassistant()
# Mock core services
for service in (
SERVICE_HOMEASSISTANT_STOP,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_CHECK_CONFIG,
):
hass.services.async_register(HASS_DOMAIN, service, async_handle_core_service)
# Init discovery Hass.io feature
async_setup_discovery_view(hass, hassio)
# Init auth Hass.io feature
async_setup_auth_view(hass, user)
# Init ingress Hass.io feature
async_setup_ingress_view(hass, host)
# Init add-on ingress panels
await async_setup_addon_panel(hass, hassio)
return True
| """Return latest available Home Assistant version.
Async friendly.
"""
return hass.data.get(DATA_HOMEASSISTANT_VERSION) |
test_logging.py | # Copyright 2001-2017 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2017 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import configparser
import datetime
import pathlib
import pickle
import io
import gc
import json
import os
import queue
import random
import re
import socket
import struct
import sys
import tempfile
from test.support.script_helper import assert_python_ok
from test import support
import textwrap
import threading
import time | import warnings
import weakref
import asyncore
from http.server import HTTPServer, BaseHTTPRequestHandler
import smtpd
from urllib.parse import urlparse, parse_qs
from socketserver import (ThreadingUDPServer, DatagramRequestHandler,
ThreadingTCPServer, StreamRequestHandler)
try:
import win32evtlog, win32evtlogutil, pywintypes
except ImportError:
win32evtlog = win32evtlogutil = pywintypes = None
try:
import zlib
except ImportError:
pass
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
self._threading_key = support.threading_setup()
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = saved_loggers = logger_dict.copy()
self.saved_name_to_level = logging._nameToLevel.copy()
self.saved_level_to_name = logging._levelToName.copy()
self.logger_states = logger_states = {}
for name in saved_loggers:
logger_states[name] = getattr(saved_loggers[name],
'disabled', None)
finally:
logging._releaseLock()
# Set two unused loggers
self.logger1 = logging.getLogger("\xab\xd7\xbb")
self.logger2 = logging.getLogger("\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
if self.logger1.hasHandlers():
hlist = self.logger1.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
if self.logger2.hasHandlers():
hlist = self.logger2.handlers + self.root_logger.handlers
raise AssertionError('Unexpected handlers: %s' % hlist)
self.root_logger.addHandler(self.root_hdlr)
self.assertTrue(self.logger1.hasHandlers())
self.assertTrue(self.logger2.hasHandlers())
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelToName.clear()
logging._levelToName.update(self.saved_level_to_name)
logging._nameToLevel.clear()
logging._nameToLevel.update(self.saved_name_to_level)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
manager = logging.getLogger().manager
manager.disable = 0
loggerDict = manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
logger_states = self.logger_states
for name in self.logger_states:
if logger_states[name] is not None:
self.saved_loggers[name].disabled = logger_states[name]
finally:
logging._releaseLock()
self.doCleanups()
support.threading_cleanup(*self._threading_key)
def assert_log_lines(self, expected_values, stream=None, pat=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(pat or self.expected_log_pat)
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
# Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.LoggerAdapter(logging.getLogger("INF"), {})
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warning(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warning(m())
DEB.info(m())
DEB.debug(m())
# These should not log.
ERR.warning(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warning(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
# Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warning(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warning(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_regression_22386(self):
"""See issue #22386 for more information."""
self.assertEqual(logging.getLevelName('INFO'), logging.INFO)
self.assertEqual(logging.getLevelName(logging.INFO), 'INFO')
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
def test_issue27935(self):
fatal = logging.getLevelName('FATAL')
self.assertEqual(fatal, logging.FATAL)
def test_regression_29220(self):
"""See issue #29220 for more information."""
logging.addLevelName(logging.INFO, '')
self.addCleanup(logging.addLevelName, logging.INFO, 'INFO')
self.assertEqual(logging.getLevelName(logging.INFO), '')
self.assertEqual(logging.getLevelName(logging.NOTSET), 'NOTSET')
self.assertEqual(logging.getLevelName('NOTSET'), logging.NOTSET)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
def test_callable_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
def filterfunc(record):
parts = record.name.split('.')
prefix = '.'.join(parts[:2])
return prefix == 'spam.eggs'
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filterfunc)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filterfunc)
def test_empty_filter(self):
f = logging.Filter()
r = logging.makeLogRecord({'name': 'spam.eggs'})
self.assertTrue(f.filter(r))
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class HandlerTest(BaseTest):
def test_name(self):
h = logging.Handler()
h.name = 'generic'
self.assertEqual(h.name, 'generic')
h.name = 'anothergeneric'
self.assertEqual(h.name, 'anothergeneric')
self.assertRaises(NotImplementedError, h.emit, None)
def test_builtin_handlers(self):
# We can't actually *use* too many handlers in the tests,
# but we can try instantiating them with various options
if sys.platform in ('linux', 'darwin'):
for existing in (True, False):
fd, fn = tempfile.mkstemp()
os.close(fd)
if not existing:
os.unlink(fn)
h = logging.handlers.WatchedFileHandler(fn, delay=True)
if existing:
dev, ino = h.dev, h.ino
self.assertEqual(dev, -1)
self.assertEqual(ino, -1)
r = logging.makeLogRecord({'msg': 'Test'})
h.handle(r)
# Now remove the file.
os.unlink(fn)
self.assertFalse(os.path.exists(fn))
# The next call should recreate the file.
h.handle(r)
self.assertTrue(os.path.exists(fn))
else:
self.assertEqual(h.dev, -1)
self.assertEqual(h.ino, -1)
h.close()
if existing:
os.unlink(fn)
if sys.platform == 'darwin':
sockname = '/var/run/syslog'
else:
sockname = '/dev/log'
try:
h = logging.handlers.SysLogHandler(sockname)
self.assertEqual(h.facility, h.LOG_USER)
self.assertTrue(h.unixsocket)
h.close()
except OSError: # syslogd might not be available
pass
for method in ('GET', 'POST', 'PUT'):
if method == 'PUT':
self.assertRaises(ValueError, logging.handlers.HTTPHandler,
'localhost', '/log', method)
else:
h = logging.handlers.HTTPHandler('localhost', '/log', method)
h.close()
h = logging.handlers.BufferingHandler(0)
r = logging.makeLogRecord({})
self.assertTrue(h.shouldFlush(r))
h.close()
h = logging.handlers.BufferingHandler(1)
self.assertFalse(h.shouldFlush(r))
h.close()
def test_path_objects(self):
"""
Test that Path objects are accepted as filename arguments to handlers.
See Issue #27493.
"""
fd, fn = tempfile.mkstemp()
os.close(fd)
os.unlink(fn)
pfn = pathlib.Path(fn)
cases = (
(logging.FileHandler, (pfn, 'w')),
(logging.handlers.RotatingFileHandler, (pfn, 'a')),
(logging.handlers.TimedRotatingFileHandler, (pfn, 'h')),
)
if sys.platform in ('linux', 'darwin'):
cases += ((logging.handlers.WatchedFileHandler, (pfn, 'w')),)
for cls, args in cases:
h = cls(*args)
self.assertTrue(os.path.exists(fn))
h.close()
os.unlink(fn)
@unittest.skipIf(os.name == 'nt', 'WatchedFileHandler not appropriate for Windows.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
self.deletion_time = time.time()
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
self.handle_time = None
self.deletion_time = None
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
try:
self.handle_time = time.time()
h.handle(r)
except Exception:
print('Deleted at %s, '
'opened at %s' % (self.deletion_time,
self.handle_time))
raise
finally:
remover.join()
h.close()
if os.path.exists(fn):
os.unlink(fn)
class BadStream(object):
def write(self, data):
raise RuntimeError('deliberate mistake')
class TestStreamHandler(logging.StreamHandler):
def handleError(self, record):
self.error_record = record
class StreamHandlerTest(BaseTest):
def test_error_handling(self):
h = TestStreamHandler(BadStream())
r = logging.makeLogRecord({})
old_raise = logging.raiseExceptions
try:
h.handle(r)
self.assertIs(h.error_record, r)
h = logging.StreamHandler(BadStream())
with support.captured_stderr() as stderr:
h.handle(r)
msg = '\nRuntimeError: deliberate mistake\n'
self.assertIn(msg, stderr.getvalue())
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
h.handle(r)
self.assertEqual('', stderr.getvalue())
finally:
logging.raiseExceptions = old_raise
def test_stream_setting(self):
"""
Test setting the handler's stream
"""
h = logging.StreamHandler()
stream = io.StringIO()
old = h.setStream(stream)
self.assertIs(old, sys.stderr)
actual = h.setStream(old)
self.assertIs(actual, stream)
# test that setting to existing value returns None
actual = h.setStream(old)
self.assertIsNone(actual)
# -- The following section could be moved into a server_helper.py module
# -- if it proves to be of wider utility than just test_logging
class TestSMTPServer(smtpd.SMTPServer):
"""
This class implements a test SMTP server.
:param addr: A (host, port) tuple which the server listens on.
You can specify a port value of zero: the server's
*port* attribute will hold the actual port number
used, which can be used in client connections.
:param handler: A callable which will be called to process
incoming messages. The handler will be passed
the client address tuple, who the message is from,
a list of recipients and the message data.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
:param sockmap: A dictionary which will be used to hold
:class:`asyncore.dispatcher` instances used by
:func:`asyncore.loop`. This avoids changing the
:mod:`asyncore` module's global state.
"""
def __init__(self, addr, handler, poll_interval, sockmap):
smtpd.SMTPServer.__init__(self, addr, None, map=sockmap,
decode_data=True)
self.port = self.socket.getsockname()[1]
self._handler = handler
self._thread = None
self.poll_interval = poll_interval
def process_message(self, peer, mailfrom, rcpttos, data):
"""
Delegates to the handler passed in to the server's constructor.
Typically, this will be a test case method.
:param peer: The client (host, port) tuple.
:param mailfrom: The address of the sender.
:param rcpttos: The addresses of the recipients.
:param data: The message.
"""
self._handler(peer, mailfrom, rcpttos, data)
def start(self):
"""
Start the server running on a separate daemon thread.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the :mod:`asyncore` loop until normal termination
conditions arise.
:param poll_interval: The interval, in seconds, used in the underlying
:func:`select` or :func:`poll` call by
:func:`asyncore.loop`.
"""
asyncore.loop(poll_interval, map=self._map)
def stop(self, timeout=None):
"""
Stop the thread by closing the server instance.
Wait for the server thread to terminate.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.close()
support.join_thread(self._thread, timeout)
self._thread = None
asyncore.close_all(map=self._map, ignore_all=True)
class ControlMixin(object):
"""
This mixin is used to start a server on a separate thread, and
shut it down programmatically. Request handling is simplified - instead
of needing to derive a suitable RequestHandler subclass, you just
provide a callable which will be passed each received request to be
processed.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request. This handler is called on the
server thread, effectively meaning that requests are
processed serially. While not quite Web scale ;-),
this should be fine for testing applications.
:param poll_interval: The polling interval in seconds.
"""
def __init__(self, handler, poll_interval):
self._thread = None
self.poll_interval = poll_interval
self._handler = handler
self.ready = threading.Event()
def start(self):
"""
Create a daemon thread to run the server, and start it.
"""
self._thread = t = threading.Thread(target=self.serve_forever,
args=(self.poll_interval,))
t.setDaemon(True)
t.start()
def serve_forever(self, poll_interval):
"""
Run the server. Set the ready flag before entering the
service loop.
"""
self.ready.set()
super(ControlMixin, self).serve_forever(poll_interval)
def stop(self, timeout=None):
"""
Tell the server thread to stop, and wait for it to do so.
:param timeout: How long to wait for the server thread
to terminate.
"""
self.shutdown()
if self._thread is not None:
support.join_thread(self._thread, timeout)
self._thread = None
self.server_close()
self.ready.clear()
class TestHTTPServer(ControlMixin, HTTPServer):
"""
An HTTP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval in seconds.
:param log: Pass ``True`` to enable log messages.
"""
def __init__(self, addr, handler, poll_interval=0.5,
log=False, sslctx=None):
class DelegatingHTTPRequestHandler(BaseHTTPRequestHandler):
def __getattr__(self, name, default=None):
if name.startswith('do_'):
return self.process_request
raise AttributeError(name)
def process_request(self):
self.server._handler(self)
def log_message(self, format, *args):
if log:
super(DelegatingHTTPRequestHandler,
self).log_message(format, *args)
HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler)
ControlMixin.__init__(self, handler, poll_interval)
self.sslctx = sslctx
def get_request(self):
try:
sock, addr = self.socket.accept()
if self.sslctx:
sock = self.sslctx.wrap_socket(sock, server_side=True)
except OSError as e:
# socket errors are silenced by the caller, print them here
sys.stderr.write("Got an error:\n%s\n" % e)
raise
return sock, addr
class TestTCPServer(ControlMixin, ThreadingTCPServer):
"""
A TCP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a single
parameter - the request - in order to process the request.
:param poll_interval: The polling interval in seconds.
:bind_and_activate: If True (the default), binds the server and starts it
listening. If False, you need to call
:meth:`server_bind` and :meth:`server_activate` at
some later time before calling :meth:`start`, so that
the server will set up the socket and listen on it.
"""
allow_reuse_address = True
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingTCPRequestHandler(StreamRequestHandler):
def handle(self):
self.server._handler(self)
ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
def server_bind(self):
super(TestTCPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
class TestUDPServer(ControlMixin, ThreadingUDPServer):
"""
A UDP server which is controllable using :class:`ControlMixin`.
:param addr: A tuple with the IP address and port to listen on.
:param handler: A handler callable which will be called with a
single parameter - the request - in order to
process the request.
:param poll_interval: The polling interval for shutdown requests,
in seconds.
:bind_and_activate: If True (the default), binds the server and
starts it listening. If False, you need to
call :meth:`server_bind` and
:meth:`server_activate` at some later time
before calling :meth:`start`, so that the server will
set up the socket and listen on it.
"""
def __init__(self, addr, handler, poll_interval=0.5,
bind_and_activate=True):
class DelegatingUDPRequestHandler(DatagramRequestHandler):
def handle(self):
self.server._handler(self)
def finish(self):
data = self.wfile.getvalue()
if data:
try:
super(DelegatingUDPRequestHandler, self).finish()
except OSError:
if not self.server._closed:
raise
ThreadingUDPServer.__init__(self, addr,
DelegatingUDPRequestHandler,
bind_and_activate)
ControlMixin.__init__(self, handler, poll_interval)
self._closed = False
def server_bind(self):
super(TestUDPServer, self).server_bind()
self.port = self.socket.getsockname()[1]
def server_close(self):
super(TestUDPServer, self).server_close()
self._closed = True
if hasattr(socket, "AF_UNIX"):
class TestUnixStreamServer(TestTCPServer):
address_family = socket.AF_UNIX
class TestUnixDatagramServer(TestUDPServer):
address_family = socket.AF_UNIX
# - end of server_helper section
class SMTPHandlerTest(BaseTest):
# bpo-14314, bpo-19665, bpo-34092: don't wait forever, timeout of 1 minute
TIMEOUT = 60.0
def test_basic(self):
sockmap = {}
server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001,
sockmap)
server.start()
addr = (support.HOST, server.port)
h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log',
timeout=self.TIMEOUT)
self.assertEqual(h.toaddrs, ['you'])
self.messages = []
r = logging.makeLogRecord({'msg': 'Hello \u2713'})
self.handled = threading.Event()
h.handle(r)
self.handled.wait(self.TIMEOUT)
server.stop()
self.assertTrue(self.handled.is_set())
self.assertEqual(len(self.messages), 1)
peer, mailfrom, rcpttos, data = self.messages[0]
self.assertEqual(mailfrom, 'me')
self.assertEqual(rcpttos, ['you'])
self.assertIn('\nSubject: Log\n', data)
self.assertTrue(data.endswith('\n\nHello \u2713'))
h.close()
def process_message(self, *args):
self.messages.append(args)
self.handled.set()
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warning(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
def test_flush_on_close(self):
"""
Test that the flush-on-close configuration works as expected.
"""
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
self.mem_logger.removeHandler(self.mem_hdlr)
# Default behaviour is to flush on close. Check that it happens.
self.mem_hdlr.close()
lines = [
('DEBUG', '1'),
('INFO', '2'),
]
self.assert_log_lines(lines)
# Now configure for flushing not to be done on close.
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr,
False)
self.mem_logger.addHandler(self.mem_hdlr)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.info(self.next_message())
self.assert_log_lines(lines) # no change
self.mem_logger.removeHandler(self.mem_hdlr)
self.mem_hdlr.close()
# assert that no new lines have been added
self.assert_log_lines(lines) # no change
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger, and uses kwargs instead of args.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
kwargs={'stream': sys.stdout,}
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config 8, check for resource warning
config8 = r"""
[loggers]
keys=root
[handlers]
keys=file
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=file
[handler_file]
class=FileHandler
level=DEBUG
args=("{tempfile}",)
"""
disable_test = """
[loggers]
keys=root
[handlers]
keys=screen
[formatters]
keys=
[logger_root]
level=DEBUG
handlers=screen
[handler_screen]
level=DEBUG
class=StreamHandler
args=(sys.stdout,)
formatter=
"""
def apply_config(self, conf, **kwargs):
file = io.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file, **kwargs)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config0_using_cp_ok(self):
# A simple config file which overrides the default settings.
with support.captured_stdout() as output:
file = io.StringIO(textwrap.dedent(self.config0))
cp = configparser.ConfigParser()
cp.read_file(file)
logging.config.fileConfig(cp)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config8_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
# Replace single backslash with double backslash in windows
# to avoid unicode error during string formatting
if os.name == "nt":
fn = fn.replace("\\", "\\\\")
config8 = self.config8.format(tempfile=fn)
self.apply_config(config8)
self.apply_config(config8)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def test_logger_disabling(self):
self.apply_config(self.disable_test)
logger = logging.getLogger('some_pristine_logger')
self.assertFalse(logger.disabled)
self.apply_config(self.disable_test)
self.assertTrue(logger.disabled)
self.apply_config(self.disable_test, disable_existing_loggers=False)
self.assertFalse(logger.disabled)
def test_defaults_do_no_interpolation(self):
"""bpo-33802 defaults should not get interpolated"""
ini = textwrap.dedent("""
[formatters]
keys=default
[formatter_default]
[handlers]
keys=console
[handler_console]
class=logging.StreamHandler
args=tuple()
[loggers]
keys=root
[logger_root]
formatter=default
handlers=console
""").strip()
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.ini')
try:
os.write(fd, ini.encode('ascii'))
os.close(fd)
logging.config.fileConfig(
fn,
defaults=dict(
version=1,
disable_existing_loggers=False,
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter"
},
},
)
)
finally:
os.unlink(fn)
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
server_class = TestTCPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_socket, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SocketHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Semaphore(0)
def tearDown(self):
"""Shutdown the TCP server."""
try:
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
if self.server:
self.server.stop(2.0)
finally:
BaseTest.tearDown(self)
def handle_socket(self, request):
conn = request.connection
while True:
chunk = conn.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = conn.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.release()
def test_output(self):
# The log message sent to the SocketHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("tcp")
logger.error("spam")
self.handled.acquire()
logger.debug("eggs")
self.handled.acquire()
self.assertEqual(self.log_output, "spam\neggs\n")
def test_noserver(self):
if self.server_exception:
self.skipTest(self.server_exception)
# Avoid timing-related failures due to SocketHandler's own hard-wired
# one-second timeout on socket.create_connection() (issue #16264).
self.sock_hdlr.retryStart = 2.5
# Kill the server
self.server.stop(2.0)
# The logging call should try to connect, which should fail
try:
raise RuntimeError('Deliberate mistake')
except RuntimeError:
self.root_logger.exception('Never sent')
self.root_logger.error('Never sent, either')
now = time.time()
self.assertGreater(self.sock_hdlr.retryTime, now)
time.sleep(self.sock_hdlr.retryTime - now + 0.001)
self.root_logger.error('Nor this')
def _get_temp_domain_socket():
fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock')
os.close(fd)
# just need a name - file can't be present, or we'll get an
# 'address already in use' error.
os.remove(fn)
return fn
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSocketHandlerTest(SocketHandlerTest):
"""Test for SocketHandler with unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixStreamServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SocketHandlerTest.setUp(self)
def tearDown(self):
SocketHandlerTest.tearDown(self)
support.unlink(self.address)
class DatagramHandlerTest(BaseTest):
"""Test for DatagramHandler."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a DatagramHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sock_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.DatagramHandler
if isinstance(server.server_address, tuple):
self.sock_hdlr = hcls('localhost', server.port)
else:
self.sock_hdlr = hcls(server.server_address, None)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the UDP server."""
try:
if self.server:
self.server.stop(2.0)
if self.sock_hdlr:
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
slen = struct.pack('>L', 0) # length of prefix
packet = request.packet[len(slen):]
obj = pickle.loads(packet)
record = logging.makeLogRecord(obj)
self.log_output += record.msg + '\n'
self.handled.set()
def test_output(self):
# The log message sent to the DatagramHandler is properly received.
if self.server_exception:
self.skipTest(self.server_exception)
logger = logging.getLogger("udp")
logger.error("spam")
self.handled.wait()
self.handled.clear()
logger.error("eggs")
self.handled.wait()
self.assertEqual(self.log_output, "spam\neggs\n")
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixDatagramHandlerTest(DatagramHandlerTest):
"""Test for DatagramHandler using Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
DatagramHandlerTest.setUp(self)
def tearDown(self):
DatagramHandlerTest.tearDown(self)
support.unlink(self.address)
class SysLogHandlerTest(BaseTest):
"""Test for SysLogHandler using UDP."""
server_class = TestUDPServer
address = ('localhost', 0)
def setUp(self):
"""Set up a UDP server to receive log messages, and a SysLogHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
# Issue #29177: deal with errors that happen during setup
self.server = self.sl_hdlr = self.server_exception = None
try:
self.server = server = self.server_class(self.address,
self.handle_datagram, 0.01)
server.start()
# Uncomment next line to test error recovery in setUp()
# raise OSError('dummy error raised')
except OSError as e:
self.server_exception = e
return
server.ready.wait()
hcls = logging.handlers.SysLogHandler
if isinstance(server.server_address, tuple):
self.sl_hdlr = hcls((server.server_address[0], server.port))
else:
self.sl_hdlr = hcls(server.server_address)
self.log_output = ''
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sl_hdlr)
self.handled = threading.Event()
def tearDown(self):
"""Shutdown the server."""
try:
if self.server:
self.server.stop(2.0)
if self.sl_hdlr:
self.root_logger.removeHandler(self.sl_hdlr)
self.sl_hdlr.close()
finally:
BaseTest.tearDown(self)
def handle_datagram(self, request):
self.log_output = request.packet
self.handled.set()
def test_output(self):
if self.server_exception:
self.skipTest(self.server_exception)
# The log message sent to the SysLogHandler is properly received.
logger = logging.getLogger("slh")
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00')
self.handled.clear()
self.sl_hdlr.append_nul = False
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m')
self.handled.clear()
self.sl_hdlr.ident = "h\xe4m-"
logger.error("sp\xe4m")
self.handled.wait()
self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m')
@unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required")
class UnixSysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with Unix sockets."""
if hasattr(socket, "AF_UNIX"):
server_class = TestUnixDatagramServer
def setUp(self):
# override the definition in the base class
self.address = _get_temp_domain_socket()
SysLogHandlerTest.setUp(self)
def tearDown(self):
SysLogHandlerTest.tearDown(self)
support.unlink(self.address)
@unittest.skipUnless(support.IPV6_ENABLED,
'IPv6 support required for this test.')
class IPv6SysLogHandlerTest(SysLogHandlerTest):
"""Test for SysLogHandler with IPv6 host."""
server_class = TestUDPServer
address = ('::1', 0)
def setUp(self):
self.server_class.address_family = socket.AF_INET6
super(IPv6SysLogHandlerTest, self).setUp()
def tearDown(self):
self.server_class.address_family = socket.AF_INET
super(IPv6SysLogHandlerTest, self).tearDown()
class HTTPHandlerTest(BaseTest):
"""Test for HTTPHandler."""
def setUp(self):
"""Set up an HTTP server to receive log messages, and a HTTPHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.handled = threading.Event()
def handle_request(self, request):
self.command = request.command
self.log_data = urlparse(request.path)
if self.command == 'POST':
try:
rlen = int(request.headers['Content-Length'])
self.post_data = request.rfile.read(rlen)
except:
self.post_data = None
request.send_response(200)
request.end_headers()
self.handled.set()
def test_output(self):
# The log message sent to the HTTPHandler is properly received.
logger = logging.getLogger("http")
root_logger = self.root_logger
root_logger.removeHandler(self.root_logger.handlers[0])
for secure in (False, True):
addr = ('localhost', 0)
if secure:
try:
import ssl
except ImportError:
sslctx = None
else:
here = os.path.dirname(__file__)
localhost_cert = os.path.join(here, "keycert.pem")
sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslctx.load_cert_chain(localhost_cert)
context = ssl.create_default_context(cafile=localhost_cert)
else:
sslctx = None
context = None
self.server = server = TestHTTPServer(addr, self.handle_request,
0.01, sslctx=sslctx)
server.start()
server.ready.wait()
host = 'localhost:%d' % server.server_port
secure_client = secure and sslctx
self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob',
secure=secure_client,
context=context,
credentials=('foo', 'bar'))
self.log_data = None
root_logger.addHandler(self.h_hdlr)
for method in ('GET', 'POST'):
self.h_hdlr.method = method
self.handled.clear()
msg = "sp\xe4m"
logger.error(msg)
self.handled.wait()
self.assertEqual(self.log_data.path, '/frob')
self.assertEqual(self.command, method)
if method == 'GET':
d = parse_qs(self.log_data.query)
else:
d = parse_qs(self.post_data.decode('utf-8'))
self.assertEqual(d['name'], ['http'])
self.assertEqual(d['funcName'], ['test_output'])
self.assertEqual(d['msg'], [msg])
self.server.stop(2.0)
self.root_logger.removeHandler(self.h_hdlr)
self.h_hdlr.close()
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fd, fn = tempfile.mkstemp(".log", "test_logging-1-")
os.close(fd)
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf-8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf-8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
# Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
# Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
# Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
warnings.filterwarnings("always", category=UserWarning)
stream = io.StringIO()
h = logging.StreamHandler(stream)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = stream.getvalue()
h.close()
self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0)
# See if an explicit file uses the original implementation
a_file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
a_file, "Dummy line")
s = a_file.getvalue()
a_file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
def test_warnings_no_handlers(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
self.addCleanup(logging.captureWarnings, False)
# confirm our assumption: no loggers are set
logger = logging.getLogger("py.warnings")
self.assertEqual(logger.handlers, [])
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42)
self.assertEqual(len(logger.handlers), 1)
self.assertIsInstance(logger.handlers[0], logging.NullHandler)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
check_no_resource_warning = support.check_no_resource_warning
expected_log_pat = r"^(\w+) \+\+ (\w+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config1a moves the handler to the root. Used with config8a
config1a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config 7 does not define compiler.parser but defines compiler.lexer
# so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8 defines both compiler and compiler.lexer
# so compiler.parser should not be disabled (since
# compiler is defined)
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
# config8a disables existing loggers
config8a = {
'version': 1,
'disable_existing_loggers' : True,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
# As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# As config0, but with properties
config14 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'.': {
'foo': 'bar',
'terminator': '!\n',
}
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
out_of_order = {
"version": 1,
"formatters": {
"mySimpleFormatter": {
"format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s",
"style": "$"
}
},
"handlers": {
"fileGlobal": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "mySimpleFormatter"
},
"bufferGlobal": {
"class": "logging.handlers.MemoryHandler",
"capacity": 5,
"formatter": "mySimpleFormatter",
"target": "fileGlobal",
"level": "DEBUG"
}
},
"loggers": {
"mymodule": {
"level": "DEBUG",
"handlers": ["bufferGlobal"],
"propagate": "true"
}
}
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with support.captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with support.captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with support.captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(Exception, self.apply_config, self.config6)
def test_config7_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
# Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_8a_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with support.captured_stdout() as output:
self.apply_config(self.config8a)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
# Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
# Nothing will be output since handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
# Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(Exception, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(Exception, self.apply_config, self.config13)
def test_config14_ok(self):
with support.captured_stdout() as output:
self.apply_config(self.config14)
h = logging._handlers['hand1']
self.assertEqual(h.foo, 'bar')
self.assertEqual(h.terminator, '!\n')
logging.warning('Exclamation')
self.assertTrue(output.getvalue().endswith('Exclamation!\n'))
def test_config15_ok(self):
def cleanup(h1, fn):
h1.close()
os.remove(fn)
with self.check_no_resource_warning():
fd, fn = tempfile.mkstemp(".log", "test_logging-X-")
os.close(fd)
config = {
"version": 1,
"handlers": {
"file": {
"class": "logging.FileHandler",
"filename": fn
}
},
"root": {
"handlers": ["file"]
}
}
self.apply_config(config)
self.apply_config(config)
handler = logging.root.handlers[0]
self.addCleanup(cleanup, handler, fn)
def setup_via_listener(self, text, verify=None):
text = text.encode("utf-8")
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0, verify)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
support.join_thread(t, 2.0)
def test_listen_config_10_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
# Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
# Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with support.captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_listen_verify(self):
def verify_fail(stuff):
return None
def verify_reverse(stuff):
return stuff[::-1]
logger = logging.getLogger("compiler.parser")
to_send = textwrap.dedent(ConfigFileTest.config1)
# First, specify a verification function that will fail.
# We expect to see no output, since our configuration
# never took effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send, verify_fail)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([], stream=output)
# Original logger output has the stuff we logged.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform no verification. Our configuration
# should take effect.
with support.captured_stdout() as output:
self.setup_via_listener(to_send) # no verify callable specified
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
# Now, perform verification which transforms the bytes.
with support.captured_stdout() as output:
self.setup_via_listener(to_send[::-1], verify_reverse)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output still has the stuff we logged before.
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], pat=r"^[\w.]+ -> (\w+): (\d+)$")
def test_out_of_order(self):
self.apply_config(self.out_of_order)
handler = logging.getLogger('mymodule').handlers[0]
self.assertIsInstance(handler.target, logging.Handler)
self.assertIsInstance(handler.formatter._style,
logging.StringTemplateStyle)
def test_baseconfig(self):
d = {
'atuple': (1, 2, 3),
'alist': ['a', 'b', 'c'],
'adict': {'d': 'e', 'f': 3 },
'nest1': ('g', ('h', 'i'), 'j'),
'nest2': ['k', ['l', 'm'], 'n'],
'nest3': ['o', 'cfg://alist', 'p'],
}
bc = logging.config.BaseConfigurator(d)
self.assertEqual(bc.convert('cfg://atuple[1]'), 2)
self.assertEqual(bc.convert('cfg://alist[1]'), 'b')
self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h')
self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm')
self.assertEqual(bc.convert('cfg://adict.d'), 'e')
self.assertEqual(bc.convert('cfg://adict[f]'), 3)
v = bc.convert('cfg://nest3')
self.assertEqual(v.pop(1), ['a', 'b', 'c'])
self.assertRaises(KeyError, bc.convert, 'cfg://nosuch')
self.assertRaises(ValueError, bc.convert, 'cfg://!')
self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]')
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
def test_set_log_record_factory(self):
man = logging.Manager(None)
expected = object()
man.setLogRecordFactory(expected)
self.assertEqual(man.logRecordFactory, expected)
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertIs(c1, logging.getLogger('xyz'))
self.assertIs(c2, logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertIs(c1, logging.getLogger('abc.def'))
self.assertIs(c2, logging.getLogger('abc.def.ghi'))
self.assertIs(c2, c3)
class DerivedLogRecord(logging.LogRecord):
pass
class LogRecordFactoryTest(BaseTest):
def setUp(self):
class CheckingFilter(logging.Filter):
def __init__(self, cls):
self.cls = cls
def filter(self, record):
t = type(record)
if t is not self.cls:
msg = 'Unexpected LogRecord type %s, expected %s' % (t,
self.cls)
raise TypeError(msg)
return True
BaseTest.setUp(self)
self.filter = CheckingFilter(DerivedLogRecord)
self.root_logger.addFilter(self.filter)
self.orig_factory = logging.getLogRecordFactory()
def tearDown(self):
self.root_logger.removeFilter(self.filter)
BaseTest.tearDown(self)
logging.setLogRecordFactory(self.orig_factory)
def test_logrecord_class(self):
self.assertRaises(TypeError, self.root_logger.warning,
self.next_message())
logging.setLogRecordFactory(DerivedLogRecord)
self.root_logger.error(self.next_message())
self.assert_log_lines([
('root', 'ERROR', '2'),
])
class QueueHandlerTest(BaseTest):
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$"
def setUp(self):
BaseTest.setUp(self)
self.queue = queue.Queue(-1)
self.que_hdlr = logging.handlers.QueueHandler(self.queue)
self.name = 'que'
self.que_logger = logging.getLogger('que')
self.que_logger.propagate = False
self.que_logger.setLevel(logging.WARNING)
self.que_logger.addHandler(self.que_hdlr)
def tearDown(self):
self.que_hdlr.close()
BaseTest.tearDown(self)
def test_queue_handler(self):
self.que_logger.debug(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
self.que_logger.info(self.next_message())
self.assertRaises(queue.Empty, self.queue.get_nowait)
msg = self.next_message()
self.que_logger.warning(msg)
data = self.queue.get_nowait()
self.assertTrue(isinstance(data, logging.LogRecord))
self.assertEqual(data.name, self.que_logger.name)
self.assertEqual((data.msg, data.args), (msg, None))
def test_formatting(self):
msg = self.next_message()
levelname = logging.getLevelName(logging.WARNING)
log_format_str = '{name} -> {levelname}: {message}'
formatted_msg = log_format_str.format(name=self.name,
levelname=levelname, message=msg)
formatter = logging.Formatter(self.log_format)
self.que_hdlr.setFormatter(formatter)
self.que_logger.warning(msg)
log_record = self.queue.get_nowait()
self.assertEqual(formatted_msg, log_record.msg)
self.assertEqual(formatted_msg, log_record.message)
@unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'),
'logging.handlers.QueueListener required for this test')
def test_queue_listener(self):
handler = support.TestHandler(support.Matcher())
listener = logging.handlers.QueueListener(self.queue, handler)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertTrue(handler.matches(levelno=logging.WARNING, message='1'))
self.assertTrue(handler.matches(levelno=logging.ERROR, message='2'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='3'))
handler.close()
# Now test with respect_handler_level set
handler = support.TestHandler(support.Matcher())
handler.setLevel(logging.CRITICAL)
listener = logging.handlers.QueueListener(self.queue, handler,
respect_handler_level=True)
listener.start()
try:
self.que_logger.warning(self.next_message())
self.que_logger.error(self.next_message())
self.que_logger.critical(self.next_message())
finally:
listener.stop()
self.assertFalse(handler.matches(levelno=logging.WARNING, message='4'))
self.assertFalse(handler.matches(levelno=logging.ERROR, message='5'))
self.assertTrue(handler.matches(levelno=logging.CRITICAL, message='6'))
if hasattr(logging.handlers, 'QueueListener'):
import multiprocessing
from unittest.mock import patch
class QueueListenerTest(BaseTest):
"""
Tests based on patch submitted for issue #27930. Ensure that
QueueListener handles all log messages.
"""
repeat = 20
@staticmethod
def setup_and_log(log_queue, ident):
"""
Creates a logger with a QueueHandler that logs to a queue read by a
QueueListener. Starts the listener, logs five messages, and stops
the listener.
"""
logger = logging.getLogger('test_logger_with_id_%s' % ident)
logger.setLevel(logging.DEBUG)
handler = logging.handlers.QueueHandler(log_queue)
logger.addHandler(handler)
listener = logging.handlers.QueueListener(log_queue)
listener.start()
logger.info('one')
logger.info('two')
logger.info('three')
logger.info('four')
logger.info('five')
listener.stop()
logger.removeHandler(handler)
handler.close()
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_queue_queue(self, mock_handle):
for i in range(self.repeat):
log_queue = queue.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@patch.object(logging.handlers.QueueListener, 'handle')
def test_handle_called_with_mp_queue(self, mock_handle):
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
log_queue = multiprocessing.Queue()
self.setup_and_log(log_queue, '%s_%s' % (self.id(), i))
log_queue.close()
log_queue.join_thread()
self.assertEqual(mock_handle.call_count, 5 * self.repeat,
'correct number of handled log messages')
@staticmethod
def get_all_from_queue(log_queue):
try:
while True:
yield log_queue.get_nowait()
except queue.Empty:
return []
def test_no_messages_in_queue_after_stop(self):
"""
Five messages are logged then the QueueListener is stopped. This
test then gets everything off the queue. Failure of this test
indicates that messages were not registered on the queue until
_after_ the QueueListener stopped.
"""
# Issue 28668: The multiprocessing (mp) module is not functional
# when the mp.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
for i in range(self.repeat):
queue = multiprocessing.Queue()
self.setup_and_log(queue, '%s_%s' %(self.id(), i))
# time.sleep(1)
items = list(self.get_all_from_queue(queue))
queue.close()
queue.join_thread()
expected = [[], [logging.handlers.QueueListener._sentinel]]
self.assertIn(items, expected,
'Found unexpected messages in queue: %s' % (
[m.msg if isinstance(m, logging.LogRecord)
else m for m in items]))
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
dst = utcoffset
def tzname(self, dt):
return 'UTC'
utc = UTC()
class FormatterTest(unittest.TestCase):
def setUp(self):
self.common = {
'name': 'formatter.test',
'level': logging.DEBUG,
'pathname': os.path.join('path', 'to', 'dummy.ext'),
'lineno': 42,
'exc_info': None,
'func': None,
'msg': 'Message with %d %s',
'args': (2, 'placeholders'),
}
self.variants = {
}
def get_record(self, name=None):
result = dict(self.common)
if name is not None:
result.update(self.variants[name])
return logging.makeLogRecord(result)
def test_percent(self):
# Test %-formatting
r = self.get_record()
f = logging.Formatter('${%(message)s}')
self.assertEqual(f.format(r), '${Message with 2 placeholders}')
f = logging.Formatter('%(random)s')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('%(asctime)s')
self.assertTrue(f.usesTime())
f = logging.Formatter('%(asctime)-15s')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime')
self.assertFalse(f.usesTime())
def test_braces(self):
# Test {}-formatting
r = self.get_record()
f = logging.Formatter('$%{message}%$', style='{')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('{random}', style='{')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('{asctime}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime!s:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('{asctime:15}', style='{')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='{')
self.assertFalse(f.usesTime())
def test_dollars(self):
# Test $-formatting
r = self.get_record()
f = logging.Formatter('$message', style='$')
self.assertEqual(f.format(r), 'Message with 2 placeholders')
f = logging.Formatter('$$%${message}%$$', style='$')
self.assertEqual(f.format(r), '$%Message with 2 placeholders%$')
f = logging.Formatter('${random}', style='$')
self.assertRaises(KeyError, f.format, r)
self.assertFalse(f.usesTime())
f = logging.Formatter('${asctime}', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('${asctime', style='$')
self.assertFalse(f.usesTime())
f = logging.Formatter('$asctime', style='$')
self.assertTrue(f.usesTime())
f = logging.Formatter('asctime', style='$')
self.assertFalse(f.usesTime())
def test_invalid_style(self):
self.assertRaises(ValueError, logging.Formatter, None, None, 'x')
def test_time(self):
r = self.get_record()
dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc)
# We use None to indicate we want the local timezone
# We're essentially converting a UTC time to local time
r.created = time.mktime(dt.astimezone(None).timetuple())
r.msecs = 123
f = logging.Formatter('%(asctime)s %(message)s')
f.converter = time.gmtime
self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123')
self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21')
f.format(r)
self.assertEqual(r.asctime, '1993-04-21 08:03:00,123')
class TestBufferingFormatter(logging.BufferingFormatter):
def formatHeader(self, records):
return '[(%d)' % len(records)
def formatFooter(self, records):
return '(%d)]' % len(records)
class BufferingFormatterTest(unittest.TestCase):
def setUp(self):
self.records = [
logging.makeLogRecord({'msg': 'one'}),
logging.makeLogRecord({'msg': 'two'}),
]
def test_default(self):
f = logging.BufferingFormatter()
self.assertEqual('', f.format([]))
self.assertEqual('onetwo', f.format(self.records))
def test_custom(self):
f = TestBufferingFormatter()
self.assertEqual('[(2)onetwo(2)]', f.format(self.records))
lf = logging.Formatter('<%(message)s>')
f = TestBufferingFormatter(lf)
self.assertEqual('[(2)<one><two>(2)]', f.format(self.records))
class ExceptionTest(BaseTest):
def test_formatting(self):
r = self.root_logger
h = RecordingHandler()
r.addHandler(h)
try:
raise RuntimeError('deliberate mistake')
except:
logging.exception('failed', stack_info=True)
r.removeHandler(h)
h.close()
r = h.records[0]
self.assertTrue(r.exc_text.startswith('Traceback (most recent '
'call last):\n'))
self.assertTrue(r.exc_text.endswith('\nRuntimeError: '
'deliberate mistake'))
self.assertTrue(r.stack_info.startswith('Stack (most recent '
'call last):\n'))
self.assertTrue(r.stack_info.endswith('logging.exception(\'failed\', '
'stack_info=True)'))
class LastResortTest(BaseTest):
def test_last_resort(self):
# Test the last resort handler
root = self.root_logger
root.removeHandler(self.root_hdlr)
old_lastresort = logging.lastResort
old_raise_exceptions = logging.raiseExceptions
try:
with support.captured_stderr() as stderr:
root.debug('This should not appear')
self.assertEqual(stderr.getvalue(), '')
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), 'Final chance!\n')
# No handlers and no last resort, so 'No handlers' message
logging.lastResort = None
with support.captured_stderr() as stderr:
root.warning('Final chance!')
msg = 'No handlers could be found for logger "root"\n'
self.assertEqual(stderr.getvalue(), msg)
# 'No handlers' message only printed once
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
# If raiseExceptions is False, no message is printed
root.manager.emittedNoHandlerWarning = False
logging.raiseExceptions = False
with support.captured_stderr() as stderr:
root.warning('Final chance!')
self.assertEqual(stderr.getvalue(), '')
finally:
root.addHandler(self.root_hdlr)
logging.lastResort = old_lastresort
logging.raiseExceptions = old_raise_exceptions
class FakeHandler:
def __init__(self, identifier, called):
for method in ('acquire', 'flush', 'close', 'release'):
setattr(self, method, self.record_call(identifier, method, called))
def record_call(self, identifier, method_name, called):
def inner():
called.append('{} - {}'.format(identifier, method_name))
return inner
class RecordingHandler(logging.NullHandler):
def __init__(self, *args, **kwargs):
super(RecordingHandler, self).__init__(*args, **kwargs)
self.records = []
def handle(self, record):
"""Keep track of all the emitted records."""
self.records.append(record)
class ShutdownTest(BaseTest):
"""Test suite for the shutdown method."""
def setUp(self):
super(ShutdownTest, self).setUp()
self.called = []
raise_exceptions = logging.raiseExceptions
self.addCleanup(setattr, logging, 'raiseExceptions', raise_exceptions)
def raise_error(self, error):
def inner():
raise error()
return inner
def test_no_failure(self):
# create some fake handlers
handler0 = FakeHandler(0, self.called)
handler1 = FakeHandler(1, self.called)
handler2 = FakeHandler(2, self.called)
# create live weakref to those handlers
handlers = map(logging.weakref.ref, [handler0, handler1, handler2])
logging.shutdown(handlerList=list(handlers))
expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release',
'1 - acquire', '1 - flush', '1 - close', '1 - release',
'0 - acquire', '0 - flush', '0 - close', '0 - release']
self.assertEqual(expected, self.called)
def _test_with_failure_in_method(self, method, error):
handler = FakeHandler(0, self.called)
setattr(handler, method, self.raise_error(error))
handlers = [logging.weakref.ref(handler)]
logging.shutdown(handlerList=list(handlers))
self.assertEqual('0 - release', self.called[-1])
def test_with_ioerror_in_acquire(self):
self._test_with_failure_in_method('acquire', OSError)
def test_with_ioerror_in_flush(self):
self._test_with_failure_in_method('flush', OSError)
def test_with_ioerror_in_close(self):
self._test_with_failure_in_method('close', OSError)
def test_with_valueerror_in_acquire(self):
self._test_with_failure_in_method('acquire', ValueError)
def test_with_valueerror_in_flush(self):
self._test_with_failure_in_method('flush', ValueError)
def test_with_valueerror_in_close(self):
self._test_with_failure_in_method('close', ValueError)
def test_with_other_error_in_acquire_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('acquire', IndexError)
def test_with_other_error_in_flush_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('flush', IndexError)
def test_with_other_error_in_close_without_raise(self):
logging.raiseExceptions = False
self._test_with_failure_in_method('close', IndexError)
def test_with_other_error_in_acquire_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'acquire', IndexError)
def test_with_other_error_in_flush_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'flush', IndexError)
def test_with_other_error_in_close_with_raise(self):
logging.raiseExceptions = True
self.assertRaises(IndexError, self._test_with_failure_in_method,
'close', IndexError)
class ModuleLevelMiscTest(BaseTest):
"""Test suite for some module level methods."""
def test_disable(self):
old_disable = logging.root.manager.disable
# confirm our assumptions are correct
self.assertEqual(old_disable, 0)
self.addCleanup(logging.disable, old_disable)
logging.disable(83)
self.assertEqual(logging.root.manager.disable, 83)
# test the default value introduced in 3.7
# (Issue #28524)
logging.disable()
self.assertEqual(logging.root.manager.disable, logging.CRITICAL)
def _test_log(self, method, level=None):
called = []
support.patch(self, logging, 'basicConfig',
lambda *a, **kw: called.append((a, kw)))
recording = RecordingHandler()
logging.root.addHandler(recording)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me: %r", recording)
else:
log_method("test me: %r", recording)
self.assertEqual(len(recording.records), 1)
record = recording.records[0]
self.assertEqual(record.getMessage(), "test me: %r" % recording)
expected_level = level if level is not None else getattr(logging, method.upper())
self.assertEqual(record.levelno, expected_level)
# basicConfig was not called!
self.assertEqual(called, [])
def test_log(self):
self._test_log('log', logging.ERROR)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
def test_set_logger_class(self):
self.assertRaises(TypeError, logging.setLoggerClass, object)
class MyLogger(logging.Logger):
pass
logging.setLoggerClass(MyLogger)
self.assertEqual(logging.getLoggerClass(), MyLogger)
logging.setLoggerClass(logging.Logger)
self.assertEqual(logging.getLoggerClass(), logging.Logger)
@support.requires_type_collecting
def test_logging_at_shutdown(self):
# Issue #20037
code = """if 1:
import logging
class A:
def __del__(self):
try:
raise ValueError("some error")
except Exception:
logging.exception("exception in __del__")
a = A()"""
rc, out, err = assert_python_ok("-c", code)
err = err.decode()
self.assertIn("exception in __del__", err)
self.assertIn("ValueError: some error", err)
class LogRecordTest(BaseTest):
def test_str_rep(self):
r = logging.makeLogRecord({})
s = str(r)
self.assertTrue(s.startswith('<LogRecord: '))
self.assertTrue(s.endswith('>'))
def test_dict_arg(self):
h = RecordingHandler()
r = logging.getLogger()
r.addHandler(h)
d = {'less' : 'more' }
logging.warning('less is %(less)s', d)
self.assertIs(h.records[0].args, d)
self.assertEqual(h.records[0].message, 'less is more')
r.removeHandler(h)
h.close()
def test_multiprocessing(self):
r = logging.makeLogRecord({})
self.assertEqual(r.processName, 'MainProcess')
try:
import multiprocessing as mp
r = logging.makeLogRecord({})
self.assertEqual(r.processName, mp.current_process().name)
except ImportError:
pass
def test_optional(self):
r = logging.makeLogRecord({})
NOT_NONE = self.assertIsNotNone
NOT_NONE(r.thread)
NOT_NONE(r.threadName)
NOT_NONE(r.process)
NOT_NONE(r.processName)
log_threads = logging.logThreads
log_processes = logging.logProcesses
log_multiprocessing = logging.logMultiprocessing
try:
logging.logThreads = False
logging.logProcesses = False
logging.logMultiprocessing = False
r = logging.makeLogRecord({})
NONE = self.assertIsNone
NONE(r.thread)
NONE(r.threadName)
NONE(r.process)
NONE(r.processName)
finally:
logging.logThreads = log_threads
logging.logProcesses = log_processes
logging.logMultiprocessing = log_multiprocessing
class BasicConfigTest(unittest.TestCase):
"""Test suite for logging.basicConfig."""
def setUp(self):
super(BasicConfigTest, self).setUp()
self.handlers = logging.root.handlers
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.original_logging_level = logging.root.level
self.addCleanup(self.cleanup)
logging.root.handlers = []
def tearDown(self):
for h in logging.root.handlers[:]:
logging.root.removeHandler(h)
h.close()
super(BasicConfigTest, self).tearDown()
def cleanup(self):
setattr(logging.root, 'handlers', self.handlers)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
logging.root.level = self.original_logging_level
def test_no_kwargs(self):
logging.basicConfig()
# handler defaults to a StreamHandler to sys.stderr
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, sys.stderr)
formatter = handler.formatter
# format defaults to logging.BASIC_FORMAT
self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT)
# datefmt defaults to None
self.assertIsNone(formatter.datefmt)
# style defaults to %
self.assertIsInstance(formatter._style, logging.PercentStyle)
# level is not explicitly set
self.assertEqual(logging.root.level, self.original_logging_level)
def test_strformatstyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="{")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_stringtemplatestyle(self):
with support.captured_stdout() as output:
logging.basicConfig(stream=sys.stdout, style="$")
logging.error("Log an error")
sys.stdout.seek(0)
self.assertEqual(output.getvalue().strip(),
"ERROR:root:Log an error")
def test_filename(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log')
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.FileHandler)
expected = logging.FileHandler('test.log', 'a')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.assertEqual(handler.stream.name, expected.stream.name)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_filemode(self):
def cleanup(h1, h2, fn):
h1.close()
h2.close()
os.remove(fn)
logging.basicConfig(filename='test.log', filemode='wb')
handler = logging.root.handlers[0]
expected = logging.FileHandler('test.log', 'wb')
self.assertEqual(handler.stream.mode, expected.stream.mode)
self.addCleanup(cleanup, handler, expected, 'test.log')
def test_stream(self):
stream = io.StringIO()
self.addCleanup(stream.close)
logging.basicConfig(stream=stream)
self.assertEqual(len(logging.root.handlers), 1)
handler = logging.root.handlers[0]
self.assertIsInstance(handler, logging.StreamHandler)
self.assertEqual(handler.stream, stream)
def test_format(self):
logging.basicConfig(format='foo')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter._style._fmt, 'foo')
def test_datefmt(self):
logging.basicConfig(datefmt='bar')
formatter = logging.root.handlers[0].formatter
self.assertEqual(formatter.datefmt, 'bar')
def test_style(self):
logging.basicConfig(style='$')
formatter = logging.root.handlers[0].formatter
self.assertIsInstance(formatter._style, logging.StringTemplateStyle)
def test_level(self):
old_level = logging.root.level
self.addCleanup(logging.root.setLevel, old_level)
logging.basicConfig(level=57)
self.assertEqual(logging.root.level, 57)
# Test that second call has no effect
logging.basicConfig(level=58)
self.assertEqual(logging.root.level, 57)
def test_incompatible(self):
assertRaises = self.assertRaises
handlers = [logging.StreamHandler()]
stream = sys.stderr
assertRaises(ValueError, logging.basicConfig, filename='test.log',
stream=stream)
assertRaises(ValueError, logging.basicConfig, filename='test.log',
handlers=handlers)
assertRaises(ValueError, logging.basicConfig, stream=stream,
handlers=handlers)
# Issue 23207: test for invalid kwargs
assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO)
# Should pop both filename and filemode even if filename is None
logging.basicConfig(filename=None, filemode='a')
def test_handlers(self):
handlers = [
logging.StreamHandler(),
logging.StreamHandler(sys.stdout),
logging.StreamHandler(),
]
f = logging.Formatter()
handlers[2].setFormatter(f)
logging.basicConfig(handlers=handlers)
self.assertIs(handlers[0], logging.root.handlers[0])
self.assertIs(handlers[1], logging.root.handlers[1])
self.assertIs(handlers[2], logging.root.handlers[2])
self.assertIsNotNone(handlers[0].formatter)
self.assertIsNotNone(handlers[1].formatter)
self.assertIs(handlers[2].formatter, f)
self.assertIs(handlers[0].formatter, handlers[1].formatter)
def test_force(self):
old_string_io = io.StringIO()
new_string_io = io.StringIO()
old_handlers = [logging.StreamHandler(old_string_io)]
new_handlers = [logging.StreamHandler(new_string_io)]
logging.basicConfig(level=logging.WARNING, handlers=old_handlers)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
logging.basicConfig(level=logging.INFO, handlers=new_handlers,
force=True)
logging.warning('warn')
logging.info('info')
logging.debug('debug')
self.assertEqual(len(logging.root.handlers), 1)
self.assertEqual(old_string_io.getvalue().strip(),
'WARNING:root:warn')
self.assertEqual(new_string_io.getvalue().strip(),
'WARNING:root:warn\nINFO:root:info')
def _test_log(self, method, level=None):
# logging.root has no handlers so basicConfig should be called
called = []
old_basic_config = logging.basicConfig
def my_basic_config(*a, **kw):
old_basic_config()
old_level = logging.root.level
logging.root.setLevel(100) # avoid having messages in stderr
self.addCleanup(logging.root.setLevel, old_level)
called.append((a, kw))
support.patch(self, logging, 'basicConfig', my_basic_config)
log_method = getattr(logging, method)
if level is not None:
log_method(level, "test me")
else:
log_method("test me")
# basicConfig was called with no arguments
self.assertEqual(called, [((), {})])
def test_log(self):
self._test_log('log', logging.WARNING)
def test_debug(self):
self._test_log('debug')
def test_info(self):
self._test_log('info')
def test_warning(self):
self._test_log('warning')
def test_error(self):
self._test_log('error')
def test_critical(self):
self._test_log('critical')
class LoggerAdapterTest(unittest.TestCase):
def setUp(self):
super(LoggerAdapterTest, self).setUp()
old_handler_list = logging._handlerList[:]
self.recording = RecordingHandler()
self.logger = logging.root
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
def cleanup():
logging._handlerList[:] = old_handler_list
self.addCleanup(cleanup)
self.addCleanup(logging.shutdown)
self.adapter = logging.LoggerAdapter(logger=self.logger, extra=None)
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_exception_excinfo(self):
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.adapter.exception('exc_info test', exc_info=exc)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_critical(self):
msg = 'critical test! %r'
self.adapter.critical(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
def test_is_enabled_for(self):
old_disable = self.adapter.logger.manager.disable
self.adapter.logger.manager.disable = 33
self.addCleanup(setattr, self.adapter.logger.manager, 'disable',
old_disable)
self.assertFalse(self.adapter.isEnabledFor(32))
def test_has_handlers(self):
self.assertTrue(self.adapter.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
self.assertFalse(self.adapter.hasHandlers())
def test_nested(self):
class Adapter(logging.LoggerAdapter):
prefix = 'Adapter'
def process(self, msg, kwargs):
return f"{self.prefix} {msg}", kwargs
msg = 'Adapters can be nested, yo.'
adapter = Adapter(logger=self.logger, extra=None)
adapter_adapter = Adapter(logger=adapter, extra=None)
adapter_adapter.prefix = 'AdapterAdapter'
self.assertEqual(repr(adapter), repr(adapter_adapter))
adapter_adapter.log(logging.CRITICAL, msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.CRITICAL)
self.assertEqual(record.msg, f"Adapter AdapterAdapter {msg}")
self.assertEqual(record.args, (self.recording,))
orig_manager = adapter_adapter.manager
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
temp_manager = object()
try:
adapter_adapter.manager = temp_manager
self.assertIs(adapter_adapter.manager, temp_manager)
self.assertIs(adapter.manager, temp_manager)
self.assertIs(self.logger.manager, temp_manager)
finally:
adapter_adapter.manager = orig_manager
self.assertIs(adapter_adapter.manager, orig_manager)
self.assertIs(adapter.manager, orig_manager)
self.assertIs(self.logger.manager, orig_manager)
class LoggerTest(BaseTest):
def setUp(self):
super(LoggerTest, self).setUp()
self.recording = RecordingHandler()
self.logger = logging.Logger(name='blah')
self.logger.addHandler(self.recording)
self.addCleanup(self.logger.removeHandler, self.recording)
self.addCleanup(self.recording.close)
self.addCleanup(logging.shutdown)
def test_set_invalid_level(self):
self.assertRaises(TypeError, self.logger.setLevel, object())
def test_exception(self):
msg = 'testing exception: %r'
exc = None
try:
1 / 0
except ZeroDivisionError as e:
exc = e
self.logger.exception(msg, self.recording)
self.assertEqual(len(self.recording.records), 1)
record = self.recording.records[0]
self.assertEqual(record.levelno, logging.ERROR)
self.assertEqual(record.msg, msg)
self.assertEqual(record.args, (self.recording,))
self.assertEqual(record.exc_info,
(exc.__class__, exc, exc.__traceback__))
def test_log_invalid_level_with_raise(self):
with support.swap_attr(logging, 'raiseExceptions', True):
self.assertRaises(TypeError, self.logger.log, '10', 'test message')
def test_log_invalid_level_no_raise(self):
with support.swap_attr(logging, 'raiseExceptions', False):
self.logger.log('10', 'test message') # no exception happens
def test_find_caller_with_stack_info(self):
called = []
support.patch(self, logging.traceback, 'print_stack',
lambda f, file: called.append(file.getvalue()))
self.logger.findCaller(stack_info=True)
self.assertEqual(len(called), 1)
self.assertEqual('Stack (most recent call last):\n', called[0])
def test_find_caller_with_stacklevel(self):
the_level = 1
def innermost():
self.logger.warning('test', stacklevel=the_level)
def inner():
innermost()
def outer():
inner()
records = self.recording.records
outer()
self.assertEqual(records[-1].funcName, 'innermost')
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'inner')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'outer')
self.assertGreater(records[-1].lineno, lineno)
lineno = records[-1].lineno
the_level += 1
outer()
self.assertEqual(records[-1].funcName, 'test_find_caller_with_stacklevel')
self.assertGreater(records[-1].lineno, lineno)
def test_make_record_with_extra_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
rv = logging._logRecordFactory(name, level, fn, lno, msg, args,
exc_info, func, sinfo)
for key in ('message', 'asctime') + tuple(rv.__dict__.keys()):
extra = {key: 'some value'}
self.assertRaises(KeyError, self.logger.makeRecord, name, level,
fn, lno, msg, args, exc_info,
extra=extra, sinfo=sinfo)
def test_make_record_with_extra_no_overwrite(self):
name = 'my record'
level = 13
fn = lno = msg = args = exc_info = func = sinfo = None
extra = {'valid_key': 'some value'}
result = self.logger.makeRecord(name, level, fn, lno, msg, args,
exc_info, extra=extra, sinfo=sinfo)
self.assertIn('valid_key', result.__dict__)
def test_has_handlers(self):
self.assertTrue(self.logger.hasHandlers())
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
self.assertFalse(self.logger.hasHandlers())
def test_has_handlers_no_propagate(self):
child_logger = logging.getLogger('blah.child')
child_logger.propagate = False
self.assertFalse(child_logger.hasHandlers())
def test_is_enabled_for(self):
old_disable = self.logger.manager.disable
self.logger.manager.disable = 23
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_is_enabled_for_disabled_logger(self):
old_disabled = self.logger.disabled
old_disable = self.logger.manager.disable
self.logger.disabled = True
self.logger.manager.disable = 21
self.addCleanup(setattr, self.logger, 'disabled', old_disabled)
self.addCleanup(setattr, self.logger.manager, 'disable', old_disable)
self.assertFalse(self.logger.isEnabledFor(22))
def test_root_logger_aliases(self):
root = logging.getLogger()
self.assertIs(root, logging.root)
self.assertIs(root, logging.getLogger(None))
self.assertIs(root, logging.getLogger(''))
self.assertIs(root, logging.getLogger('foo').root)
self.assertIs(root, logging.getLogger('foo.bar').root)
self.assertIs(root, logging.getLogger('foo').parent)
self.assertIsNot(root, logging.getLogger('\0'))
self.assertIsNot(root, logging.getLogger('foo.bar').parent)
def test_invalid_names(self):
self.assertRaises(TypeError, logging.getLogger, any)
self.assertRaises(TypeError, logging.getLogger, b'foo')
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for name in ('', 'root', 'foo', 'foo.bar', 'baz.bar'):
logger = logging.getLogger(name)
s = pickle.dumps(logger, proto)
unpickled = pickle.loads(s)
self.assertIs(unpickled, logger)
def test_caching(self):
root = self.root_logger
logger1 = logging.getLogger("abc")
logger2 = logging.getLogger("abc.def")
# Set root logger level and ensure cache is empty
root.setLevel(logging.ERROR)
self.assertEqual(logger2.getEffectiveLevel(), logging.ERROR)
self.assertEqual(logger2._cache, {})
# Ensure cache is populated and calls are consistent
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
self.assertFalse(logger2.isEnabledFor(logging.DEBUG))
self.assertEqual(logger2._cache, {logging.ERROR: True, logging.DEBUG: False})
self.assertEqual(root._cache, {})
self.assertTrue(logger2.isEnabledFor(logging.ERROR))
# Ensure root cache gets populated
self.assertEqual(root._cache, {})
self.assertTrue(root.isEnabledFor(logging.ERROR))
self.assertEqual(root._cache, {logging.ERROR: True})
# Set parent logger level and ensure caches are emptied
logger1.setLevel(logging.CRITICAL)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
# Ensure logger2 uses parent logger's effective level
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
# Set level to NOTSET and ensure caches are empty
logger2.setLevel(logging.NOTSET)
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Verify logger2 follows parent and not root
self.assertFalse(logger2.isEnabledFor(logging.ERROR))
self.assertTrue(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger1.isEnabledFor(logging.ERROR))
self.assertTrue(logger1.isEnabledFor(logging.CRITICAL))
self.assertTrue(root.isEnabledFor(logging.ERROR))
# Disable logging in manager and ensure caches are clear
logging.disable()
self.assertEqual(logger2.getEffectiveLevel(), logging.CRITICAL)
self.assertEqual(logger2._cache, {})
self.assertEqual(logger1._cache, {})
self.assertEqual(root._cache, {})
# Ensure no loggers are enabled
self.assertFalse(logger1.isEnabledFor(logging.CRITICAL))
self.assertFalse(logger2.isEnabledFor(logging.CRITICAL))
self.assertFalse(root.isEnabledFor(logging.CRITICAL))
class BaseFileTest(BaseTest):
"Base class for handler tests that write log files"
def setUp(self):
BaseTest.setUp(self)
fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-")
os.close(fd)
self.rmfiles = []
def tearDown(self):
for fn in self.rmfiles:
os.unlink(fn)
if os.path.exists(self.fn):
os.unlink(self.fn)
BaseTest.tearDown(self)
def assertLogFile(self, filename):
"Assert a log file is there and register it for deletion"
self.assertTrue(os.path.exists(filename),
msg="Log file %r does not exist" % filename)
self.rmfiles.append(filename)
class FileHandlerTest(BaseFileTest):
def test_delay(self):
os.unlink(self.fn)
fh = logging.FileHandler(self.fn, delay=True)
self.assertIsNone(fh.stream)
self.assertFalse(os.path.exists(self.fn))
fh.handle(logging.makeLogRecord({}))
self.assertIsNotNone(fh.stream)
self.assertTrue(os.path.exists(self.fn))
fh.close()
class RotatingFileHandlerTest(BaseFileTest):
def next_rec(self):
return logging.LogRecord('n', logging.DEBUG, 'p', 1,
self.next_message(), None, None, None)
def test_should_not_rollover(self):
# If maxbytes is zero rollover never occurs
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0)
self.assertFalse(rh.shouldRollover(None))
rh.close()
def test_should_rollover(self):
rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1)
self.assertTrue(rh.shouldRollover(self.next_rec()))
rh.close()
def test_file_created(self):
# checks that the file is created and assumes it was created
# by us
rh = logging.handlers.RotatingFileHandler(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.close()
def test_rollover_filenames(self):
def namer(name):
return name + ".test"
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.namer = namer
rh.emit(self.next_rec())
self.assertLogFile(self.fn)
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".1"))
rh.emit(self.next_rec())
self.assertLogFile(namer(self.fn + ".2"))
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
@support.requires_zlib
def test_rotator(self):
def namer(name):
return name + ".gz"
def rotator(source, dest):
with open(source, "rb") as sf:
data = sf.read()
compressed = zlib.compress(data, 9)
with open(dest, "wb") as df:
df.write(compressed)
os.remove(source)
rh = logging.handlers.RotatingFileHandler(
self.fn, backupCount=2, maxBytes=1)
rh.rotator = rotator
rh.namer = namer
m1 = self.next_rec()
rh.emit(m1)
self.assertLogFile(self.fn)
m2 = self.next_rec()
rh.emit(m2)
fn = namer(self.fn + ".1")
self.assertLogFile(fn)
newline = os.linesep
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
self.assertLogFile(fn)
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m1.msg + newline)
rh.emit(self.next_rec())
fn = namer(self.fn + ".2")
with open(fn, "rb") as f:
compressed = f.read()
data = zlib.decompress(compressed)
self.assertEqual(data.decode("ascii"), m2.msg + newline)
self.assertFalse(os.path.exists(namer(self.fn + ".3")))
rh.close()
class TimedRotatingFileHandlerTest(BaseFileTest):
# other test methods added below
def test_rollover(self):
fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S',
backupCount=1)
fmt = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fmt)
r1 = logging.makeLogRecord({'msg': 'testing - initial'})
fh.emit(r1)
self.assertLogFile(self.fn)
time.sleep(1.1) # a little over a second ...
r2 = logging.makeLogRecord({'msg': 'testing - after delay'})
fh.emit(r2)
fh.close()
# At this point, we should have a recent rotated file which we
# can test for the existence of. However, in practice, on some
# machines which run really slowly, we don't know how far back
# in time to go to look for the log file. So, we go back a fair
# bit, and stop as soon as we see a rotated file. In theory this
# could of course still fail, but the chances are lower.
found = False
now = datetime.datetime.now()
GO_BACK = 5 * 60 # seconds
for secs in range(GO_BACK):
prev = now - datetime.timedelta(seconds=secs)
fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S")
found = os.path.exists(fn)
if found:
self.rmfiles.append(fn)
break
msg = 'No rotated files found, went back %d seconds' % GO_BACK
if not found:
# print additional diagnostics
dn, fn = os.path.split(self.fn)
files = [f for f in os.listdir(dn) if f.startswith(fn)]
print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr)
print('The only matching files are: %s' % files, file=sys.stderr)
for f in files:
print('Contents of %s:' % f)
path = os.path.join(dn, f)
with open(path, 'r') as tf:
print(tf.read())
self.assertTrue(found, msg=msg)
def test_invalid(self):
assertRaises = self.assertRaises
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'X', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W', delay=True)
assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler,
self.fn, 'W7', delay=True)
def test_compute_rollover_daily_attime(self):
currentTime = 0
atTime = datetime.time(12, 0, 0)
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
actual = rh.computeRollover(currentTime)
self.assertEqual(actual, currentTime + 12 * 60 * 60)
actual = rh.computeRollover(currentTime + 13 * 60 * 60)
self.assertEqual(actual, currentTime + 36 * 60 * 60)
finally:
rh.close()
#@unittest.skipIf(True, 'Temporarily skipped while failures investigated.')
def test_compute_rollover_weekly_attime(self):
currentTime = int(time.time())
today = currentTime - currentTime % 86400
atTime = datetime.time(12, 0, 0)
wday = time.gmtime(today).tm_wday
for day in range(7):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when='W%d' % day, interval=1, backupCount=0, utc=True,
atTime=atTime)
try:
if wday > day:
# The rollover day has already passed this week, so we
# go over into next week
expected = (7 - wday + day)
else:
expected = (day - wday)
# At this point expected is in days from now, convert to seconds
expected *= 24 * 60 * 60
# Add in the rollover time
expected += 12 * 60 * 60
# Add in adjustment for today
expected += today
actual = rh.computeRollover(today)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
if day == wday:
# goes into following week
expected += 7 * 24 * 60 * 60
actual = rh.computeRollover(today + 13 * 60 * 60)
if actual != expected:
print('failed in timezone: %d' % time.timezone)
print('local vars: %s' % locals())
self.assertEqual(actual, expected)
finally:
rh.close()
def secs(**kw):
return datetime.timedelta(**kw) // datetime.timedelta(seconds=1)
for when, exp in (('S', 1),
('M', 60),
('H', 60 * 60),
('D', 60 * 60 * 24),
('MIDNIGHT', 60 * 60 * 24),
# current time (epoch start) is a Thursday, W0 means Monday
('W0', secs(days=4, hours=24)),
):
def test_compute_rollover(self, when=when, exp=exp):
rh = logging.handlers.TimedRotatingFileHandler(
self.fn, when=when, interval=1, backupCount=0, utc=True)
currentTime = 0.0
actual = rh.computeRollover(currentTime)
if exp != actual:
# Failures occur on some systems for MIDNIGHT and W0.
# Print detailed calculation for MIDNIGHT so we can try to see
# what's going on
if when == 'MIDNIGHT':
try:
if rh.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = logging.handlers._MIDNIGHT - ((currentHour * 60 +
currentMinute) * 60 +
currentSecond)
result = currentTime + r
print('t: %s (%s)' % (t, rh.utc), file=sys.stderr)
print('currentHour: %s' % currentHour, file=sys.stderr)
print('currentMinute: %s' % currentMinute, file=sys.stderr)
print('currentSecond: %s' % currentSecond, file=sys.stderr)
print('r: %s' % r, file=sys.stderr)
print('result: %s' % result, file=sys.stderr)
except Exception:
print('exception in diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr)
self.assertEqual(exp, actual)
rh.close()
setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover)
@unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil/pywintypes required for this test.')
class NTEventLogHandlerTest(BaseTest):
def test_basic(self):
logtype = 'Application'
elh = win32evtlog.OpenEventLog(None, logtype)
num_recs = win32evtlog.GetNumberOfEventLogRecords(elh)
try:
h = logging.handlers.NTEventLogHandler('test_logging')
except pywintypes.error as e:
if e.winerror == 5: # access denied
raise unittest.SkipTest('Insufficient privileges to run test')
raise
r = logging.makeLogRecord({'msg': 'Test Log Message'})
h.handle(r)
h.close()
# Now see if the event is recorded
self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh))
flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \
win32evtlog.EVENTLOG_SEQUENTIAL_READ
found = False
GO_BACK = 100
events = win32evtlog.ReadEventLog(elh, flags, GO_BACK)
for e in events:
if e.SourceName != 'test_logging':
continue
msg = win32evtlogutil.SafeFormatMessage(e, logtype)
if msg != 'Test Log Message\r\n':
continue
found = True
break
msg = 'Record not found in event log, went back %d records' % GO_BACK
self.assertTrue(found, msg=msg)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
blacklist = {'logThreads', 'logMultiprocessing',
'logProcesses', 'currentframe',
'PercentStyle', 'StrFormatStyle', 'StringTemplateStyle',
'Filterer', 'PlaceHolder', 'Manager', 'RootLogger',
'root', 'threading'}
support.check__all__(self, logging, blacklist=blacklist)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@support.run_with_locale('LC_ALL', '')
def test_main():
tests = [
BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest,
HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest,
DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest,
ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest,
StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest,
QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest,
LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest,
RotatingFileHandlerTest, LastResortTest, LogRecordTest,
ExceptionTest, SysLogHandlerTest, IPv6SysLogHandlerTest, HTTPHandlerTest,
NTEventLogHandlerTest, TimedRotatingFileHandlerTest,
UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest,
MiscTestCase
]
if hasattr(logging.handlers, 'QueueListener'):
tests.append(QueueListenerTest)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main() | import unittest |
general.rs | use super::*;
use std::collections::HashSet;
pub trait PassiveGeneralScan{
fn check_server_url(&self)->Vec<Alert>;
fn check_additional_properties(&self)->Vec<Alert>;
fn check_successes(&self)->Vec<Alert>;
fn check_default_response(&self)->Vec<Alert>;
fn check_response_body_schema(&self)->Vec<Alert>;
fn example_inconsistant_schema(&self)->Vec<Alert>;
fn check_default_type(&self)->Vec<Alert>;
fn check_enum_type(&self)->Vec<Alert>;
fn check_required_undefined(&self)->Vec<Alert>;
fn check_unused_schema(&self)->Vec<Alert>;
}
///Rule fucntions implementation
impl PassiveGeneralScan for PassiveSwaggerScan{
///Can raise no https alert and invalid url in server alert
fn check_server_url(&self)->Vec<Alert>{
let mut alerts = vec![];
let mut server_addrs = HashSet::new();
if let Some(servers) = &self.swagger.servers{
alerts.extend(check_servers_for_server_url_rule(servers,"swagger root servers",&mut server_addrs));
}
for (path,item) in &self.swagger.paths{
for (m,op) in item.get_ops(){
if let Some(servers) = &op.servers{
alerts.extend(check_servers_for_server_url_rule(servers,&format!("swagger {} {} servers",path,m),&mut server_addrs));
}
}
}
//println!("{:?}",alerts);
alerts
}
fn check_successes(&self)->Vec<Alert>{
let mut alerts = vec![];
for (path,item) in &self.swagger.paths {
for (m,op) in item.get_ops(){
let statuses = op.responses().iter().map(|(k,_v)| k.clone()).collect::<Vec<String>>();
let mut found = false;
for status in statuses{
if let Ok(s) = status.parse::<u16>(){
if (200..300).contains(&s){
found = true;
break;
}
}
}
if !found{
alerts.push(Alert::new(Level::Low,"Responses have no success status(2XX)",format!("swagger path:{} operation:{}",path,m)));
}
}
}
alerts
}
fn check_additional_properties(&self)->Vec<Alert>{
let mut alerts = vec![]; | for (name,schema) in schemas{
alerts.extend(additional_properties_test(&schema.inner(&self.swagger_value),format!("swagger root components schema:{}",name)))
}
}
}
alerts
}
fn check_default_response(&self)->Vec<Alert>{
let mut alerts = vec![];
let message = "No default response defined";
for (responses,location) in get_responses(&self.swagger){
if responses.get("default").is_none(){
alerts.push(Alert::new(Level::Low,message,location));
}
}
//println!("{:?}",alerts);
alerts
}
fn check_response_body_schema(&self)->Vec<Alert>{
let mut alerts = vec![];
let message = "Response body doesn't have a schema";
for (responses,location) in get_responses(&self.swagger){
for (status,response) in responses{
if let Some(content) = response.inner(&self.swagger_value).content{
for (name,m_t) in content{
if m_t.schema.is_none(){
alerts.push(Alert::new(Level::Medium,message,format!("{} status:{} media type:{}",location,status,name)));
}
}
}
}
}
alerts
}
fn example_inconsistant_schema(&self)->Vec<Alert>{
vec![]
}
fn check_default_type(&self)->Vec<Alert>{
let mut alerts = vec![];
for (param,loc) in get_params(&self.swagger,&self.swagger_value){
alerts.extend(param_default_rec(¶m,loc));
}
alerts
}
fn check_enum_type(&self)->Vec<Alert>{
let mut alerts = vec![];
for (param,loc) in get_params(&self.swagger,&self.swagger_value){
alerts.extend(param_enum_rec(¶m,loc));
}
alerts
}
fn check_required_undefined(&self)->Vec<Alert>{
vec![]
}
fn check_unused_schema(&self)->Vec<Alert>{
let mut alerts = vec![];
let swagger_str = serde_json::to_string(&self.swagger).unwrap();
if let Some(comps) = &self.swagger.components{
if let Some(schemas) = &comps.schemas{
for name in schemas.keys(){
let schema_path = format!("#/components/schemas/{}",name);
if !swagger_str.contains(&schema_path){
alerts.push(Alert::new(Level::Info,"Schema is defined but never used",format!("swagger root components schema:{}",name)));
}
}
}
}
alerts
}
} | if let Some(comps) = &self.swagger.components{
if let Some(schemas) = &comps.schemas{ |
main.go | package main
import (
"encoding/json"
"fmt"
log "github.com/sirupsen/logrus"
"io/ioutil"
"time"
)
var iterations int
func | () {
// Start out at position -1 because we want the first piece added to be at 0
b := board{CurrentPosition: -1}
dataPath := "./data.json"
jsonFile, err := ioutil.ReadFile(dataPath)
if err != nil {
log.Fatalf("Error opening %s", dataPath)
}
err = json.Unmarshal(jsonFile, &b.Squares)
if err != nil {
log.Fatalf("Error unmarshalling %s", dataPath)
}
start := time.Now()
b.solve()
b.print()
duration := time.Since(start)
fmt.Printf("Solution took %dms and %d iterations\n", duration.Milliseconds(), iterations)
}
| main |
unique_paths_II.go | package unique_paths_II
func uniquePathsWithObstacles(obstacleGrid [][]int) int {
if len(obstacleGrid) == 0 {
return 0
}
m, n := len(obstacleGrid)-1, len(obstacleGrid[0])-1
if obstacleGrid[m][n] == 1 {
return 0
}
matrix := make([][]int, m+1)
for i := range matrix {
matrix[i] = make([]int, n+1)
}
for i := 0; i <= m; i++ {
for j := 0; j <= n; j++ {
if obstacleGrid[i][j] == 1 {
continue
}
if i == 0 && j == 0 {
matrix[i][j] = 1 - obstacleGrid[0][0]
} else if i == 0 {
matrix[i][j] = matrix[i][j-1]
} else if j == 0 {
matrix[i][j] = matrix[i-1][j]
} else {
matrix[i][j] = matrix[i-1][j] + matrix[i][j-1]
} | }
return matrix[m][n]
} | } |
between.go | package between
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strconv"
"text/template"
"github.com/mxmCherry/openrtb/v15/openrtb2"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/errortypes"
"github.com/prebid/prebid-server/macros"
"github.com/prebid/prebid-server/openrtb_ext"
)
type BetweenAdapter struct {
EndpointTemplate template.Template
}
func (a *BetweenAdapter) MakeRequests(request *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) ([]*adapters.RequestData, []error) {
var errors []error
if len(request.Imp) == 0 {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("No valid Imps in Bid Request"),
}}
}
ext, errors := preprocess(request)
if errors != nil && len(errors) > 0 {
return nil, errors
}
endpoint, err := a.buildEndpointURL(ext)
if err != nil {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Failed to build endpoint URL: %s", err),
}}
}
data, err := json.Marshal(request)
if err != nil {
return nil, []error{&errortypes.BadInput{
Message: fmt.Sprintf("Error in packaging request to JSON"),
}}
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
if request.Device != nil {
addHeaderIfNonEmpty(headers, "User-Agent", request.Device.UA)
addHeaderIfNonEmpty(headers, "X-Forwarded-For", request.Device.IP)
addHeaderIfNonEmpty(headers, "Accept-Language", request.Device.Language)
if request.Device.DNT != nil {
addHeaderIfNonEmpty(headers, "DNT", strconv.Itoa(int(*request.Device.DNT)))
}
}
if request.Site != nil {
addHeaderIfNonEmpty(headers, "Referer", request.Site.Page)
}
return []*adapters.RequestData{{
Method: "POST",
Uri: endpoint,
Body: data,
Headers: headers,
}}, errors
}
func unpackImpExt(imp *openrtb2.Imp) (*openrtb_ext.ExtImpBetween, error) {
var bidderExt adapters.ExtImpBidder
if err := json.Unmarshal(imp.Ext, &bidderExt); err != nil {
return nil, &errortypes.BadInput{
Message: fmt.Sprintf("ignoring imp id=%s, invalid BidderExt", imp.ID),
}
}
var betweenExt openrtb_ext.ExtImpBetween
if err := json.Unmarshal(bidderExt.Bidder, &betweenExt); err != nil {
return nil, &errortypes.BadInput{
Message: fmt.Sprintf("ignoring imp id=%s, invalid ImpExt", imp.ID),
}
}
return &betweenExt, nil
}
func (a *BetweenAdapter) buildEndpointURL(e *openrtb_ext.ExtImpBetween) (string, error) {
missingRequiredParameterMessage := "required BetweenSSP parameter \"%s\" is missing"
if e.Host == "" {
return "", &errortypes.BadInput{
Message: fmt.Sprintf(missingRequiredParameterMessage, "host"),
}
}
if e.PublisherID == "" {
return "", &errortypes.BadInput{
Message: fmt.Sprintf(missingRequiredParameterMessage, "publisher_id"),
}
}
return macros.ResolveMacros(a.EndpointTemplate, macros.EndpointTemplateParams{Host: e.Host, PublisherID: e.PublisherID})
}
func buildImpBanner(imp *openrtb2.Imp) error {
if imp.Banner == nil {
return &errortypes.BadInput{
Message: fmt.Sprintf("Request needs to include a Banner object"),
}
}
banner := *imp.Banner
if banner.W == nil && banner.H == nil {
if len(banner.Format) == 0 {
return &errortypes.BadInput{
Message: fmt.Sprintf("Need at least one size to build request"),
}
}
format := banner.Format[0]
banner.Format = banner.Format[1:]
banner.W = &format.W
banner.H = &format.H
imp.Banner = &banner
}
return nil
}
// Add Between required properties to Imp object
func addImpProps(imp *openrtb2.Imp, secure *int8, betweenExt *openrtb_ext.ExtImpBetween) {
imp.Secure = secure
}
// Adding header fields to request header
func addHeaderIfNonEmpty(headers http.Header, headerName string, headerValue string) {
if len(headerValue) > 0 {
headers.Add(headerName, headerValue)
}
}
// Handle request errors and formatting to be sent to Between
func preprocess(request *openrtb2.BidRequest) (*openrtb_ext.ExtImpBetween, []error) {
errors := make([]error, 0, len(request.Imp))
resImps := make([]openrtb2.Imp, 0, len(request.Imp))
secure := int8(0)
if request.Site != nil && request.Site.Page != "" {
pageURL, err := url.Parse(request.Site.Page)
if err == nil && pageURL.Scheme == "https" {
secure = int8(1)
}
}
var betweenExt *openrtb_ext.ExtImpBetween
for _, imp := range request.Imp {
var err error
betweenExt, err = unpackImpExt(&imp)
if err != nil {
errors = append(errors, err)
continue
}
addImpProps(&imp, &secure, betweenExt)
if err := buildImpBanner(&imp); err != nil {
errors = append(errors, err)
continue
}
resImps = append(resImps, imp)
}
request.Imp = resImps
return betweenExt, errors
}
func (a *BetweenAdapter) MakeBids(internalRequest *openrtb2.BidRequest, externalRequest *adapters.RequestData, response *adapters.ResponseData) (*adapters.BidderResponse, []error) {
if response.StatusCode == http.StatusNoContent {
// no bid response
return nil, nil
}
if response.StatusCode != http.StatusOK {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Invalid Status Returned: %d. Run with request.debug = 1 for more info", response.StatusCode),
}}
}
var bidResp openrtb2.BidResponse
if err := json.Unmarshal(response.Body, &bidResp); err != nil {
return nil, []error{&errortypes.BadServerResponse{
Message: fmt.Sprintf("Unable to unpackage bid response. Error %s", err.Error()),
}}
}
bidResponse := adapters.NewBidderResponseWithBidsCapacity(1)
for _, sb := range bidResp.SeatBid {
for i := range sb.Bid {
bidResponse.Bids = append(bidResponse.Bids, &adapters.TypedBid{
Bid: &sb.Bid[i],
BidType: openrtb_ext.BidTypeBanner,
})
}
}
return bidResponse, nil
}
// Builder builds a new instance of the Between adapter for the given bidder with the given config.
func | (bidderName openrtb_ext.BidderName, config config.Adapter) (adapters.Bidder, error) {
template, err := template.New("endpointTemplate").Parse(config.Endpoint)
if err != nil {
return nil, fmt.Errorf("unable to parse endpoint url template: %v", err)
}
bidder := BetweenAdapter{
EndpointTemplate: *template,
}
return &bidder, nil
}
| Builder |
daemon.py | # Copyright (c) 2010, 2011, 2012 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import fcntl
import os
import resource
import signal
import sys
import time
import ovs.dirs
import ovs.fatal_signal
#import ovs.lockfile
import ovs.process
import ovs.socket_util
import ovs.timeval
import ovs.util
import ovs.vlog
vlog = ovs.vlog.Vlog("daemon")
# --detach: Should we run in the background?
_detach = False
# --pidfile: Name of pidfile (null if none).
_pidfile = None
# Our pidfile's inode and device, if we have created one.
_pidfile_dev = None
_pidfile_ino = None
# --overwrite-pidfile: Create pidfile even if one already exists and is locked?
_overwrite_pidfile = False
# --no-chdir: Should we chdir to "/"?
_chdir = True
# --monitor: Should a supervisory process monitor the daemon and restart it if
# it dies due to an error signal?
_monitor = False
# File descriptor used by daemonize_start() and daemonize_complete().
_daemonize_fd = None
RESTART_EXIT_CODE = 5
def make_pidfile_name(name):
"""Returns the file name that would be used for a pidfile if 'name' were
provided to set_pidfile()."""
if name is None or name == "":
return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)
else:
return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name)
def set_pidfile(name):
"""Sets up a following call to daemonize() to create a pidfile named
'name'. If 'name' begins with '/', then it is treated as an absolute path.
Otherwise, it is taken relative to ovs.util.RUNDIR, which is
$(prefix)/var/run by default.
If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is
used."""
global _pidfile
_pidfile = make_pidfile_name(name)
def get_pidfile():
"""Returns an absolute path to the configured pidfile, or None if no
pidfile is configured."""
return _pidfile
def set_no_chdir():
"""Sets that we do not chdir to "/"."""
global _chdir
_chdir = False
def is_chdir_enabled():
"""Will we chdir to "/" as part of daemonizing?"""
return _chdir
def ignore_existing_pidfile():
"""Normally, daemonize() or daemonize_start() will terminate the program
with a message if a locked pidfile already exists. If this function is
called, an existing pidfile will be replaced, with a warning."""
global _overwrite_pidfile
_overwrite_pidfile = True
def set_detach():
"""Sets up a following call to daemonize() to detach from the foreground
session, running this process in the background."""
global _detach
_detach = True
def get_detach():
"""Will daemonize() really detach?"""
return _detach
def set_monitor():
"""Sets up a following call to daemonize() to fork a supervisory process to
monitor the daemon and restart it if it dies due to an error signal."""
global _monitor
_monitor = True
def _fatal(msg):
vlog.err(msg)
sys.stderr.write("%s\n" % msg)
sys.exit(1)
def _make_pidfile():
"""If a pidfile has been configured, creates it and stores the running
process's pid in it. Ensures that the pidfile will be deleted when the
process exits."""
pid = os.getpid()
# Create a temporary pidfile.
tmpfile = "%s.tmp%d" % (_pidfile, pid)
ovs.fatal_signal.add_file_to_unlink(tmpfile)
try:
# This is global to keep Python from garbage-collecting and
# therefore closing our file after this function exits. That would
# unlock the lock for us, and we don't want that.
global file_handle
file_handle = open(tmpfile, "w")
except IOError, e:
_fatal("%s: create failed (%s)" % (tmpfile, e.strerror))
try:
s = os.fstat(file_handle.fileno())
except IOError, e:
_fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror))
try:
file_handle.write("%s\n" % pid)
file_handle.flush()
except OSError, e:
_fatal("%s: write failed: %s" % (tmpfile, e.strerror))
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
_fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror))
# Rename or link it to the correct name.
if _overwrite_pidfile:
try:
os.rename(tmpfile, _pidfile)
except OSError, e:
_fatal("failed to rename \"%s\" to \"%s\" (%s)"
% (tmpfile, _pidfile, e.strerror))
else:
while True:
try:
os.link(tmpfile, _pidfile)
error = 0
except OSError, e:
error = e.errno
if error == errno.EEXIST:
_check_already_running()
elif error != errno.EINTR:
break
if error:
_fatal("failed to link \"%s\" as \"%s\" (%s)"
% (tmpfile, _pidfile, os.strerror(error)))
# Ensure that the pidfile will get deleted on exit.
ovs.fatal_signal.add_file_to_unlink(_pidfile)
# Delete the temporary pidfile if it still exists.
if not _overwrite_pidfile:
error = ovs.fatal_signal.unlink_file_now(tmpfile)
if error:
_fatal("%s: unlink failed (%s)" % (tmpfile, os.strerror(error)))
global _pidfile_dev
global _pidfile_ino
_pidfile_dev = s.st_dev
_pidfile_ino = s.st_ino
def daemonize():
"""If configured with set_pidfile() or set_detach(), creates the pid file
and detaches from the foreground session."""
daemonize_start()
daemonize_complete()
def _waitpid(pid, options):
while True:
try:
return os.waitpid(pid, options)
except OSError, e:
if e.errno == errno.EINTR:
pass
return -e.errno, 0
def _fork_and_wait_for_startup():
try:
rfd, wfd = os.pipe()
except OSError, e:
sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
sys.exit(1)
try:
pid = os.fork()
except OSError, e:
sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
sys.exit(1)
if pid > 0:
# Running in parent process.
os.close(wfd)
ovs.fatal_signal.fork()
while True:
try:
s = os.read(rfd, 1)
error = 0
except OSError, e:
s = ""
error = e.errno
if error != errno.EINTR:
break
if len(s) != 1:
retval, status = _waitpid(pid, 0)
if retval == pid:
if os.WIFEXITED(status) and os.WEXITSTATUS(status):
# Child exited with an error. Convey the same error to
# our parent process as a courtesy.
sys.exit(os.WEXITSTATUS(status))
else:
sys.stderr.write("fork child failed to signal "
"startup (%s)\n"
% ovs.process.status_msg(status))
else:
assert retval < 0
sys.stderr.write("waitpid failed (%s)\n"
% os.strerror(-retval))
sys.exit(1)
os.close(rfd)
else:
# Running in parent process.
os.close(rfd)
ovs.timeval.postfork()
#ovs.lockfile.postfork()
global _daemonize_fd
_daemonize_fd = wfd
return pid
def _fork_notify_startup(fd):
if fd is not None:
error, bytes_written = ovs.socket_util.write_fully(fd, "0")
if error:
sys.stderr.write("could not write to pipe\n")
sys.exit(1)
os.close(fd)
def _should_restart(status):
global RESTART_EXIT_CODE
if os.WIFEXITED(status) and os.WEXITSTATUS(status) == RESTART_EXIT_CODE:
return True
if os.WIFSIGNALED(status):
for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL",
"SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"):
if os.WTERMSIG(status) == getattr(signal, signame, None):
return True
return False
def _monitor_daemon(daemon_pid):
# XXX should log daemon's stderr output at startup time
# XXX should use setproctitle module if available
last_restart = None
while True:
retval, status = _waitpid(daemon_pid, 0)
if retval < 0:
sys.stderr.write("waitpid failed\n")
sys.exit(1)
elif retval == daemon_pid:
status_msg = ("pid %d died, %s"
% (daemon_pid, ovs.process.status_msg(status)))
if _should_restart(status):
if os.WCOREDUMP(status):
# Disable further core dumps to save disk space.
try:
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
except resource.error:
vlog.warn("failed to disable core dumps")
# Throttle restarts to no more than once every 10 seconds.
if (last_restart is not None and
ovs.timeval.msec() < last_restart + 10000):
vlog.warn("%s, waiting until 10 seconds since last "
"restart" % status_msg)
while True:
now = ovs.timeval.msec()
wakeup = last_restart + 10000
if now > wakeup:
break
print "sleep %f" % ((wakeup - now) / 1000.0)
time.sleep((wakeup - now) / 1000.0)
last_restart = ovs.timeval.msec()
vlog.err("%s, restarting" % status_msg)
daemon_pid = _fork_and_wait_for_startup()
if not daemon_pid:
break
else:
vlog.info("%s, exiting" % status_msg)
sys.exit(0)
# Running in new daemon process.
def _close_standard_fds():
"""Close stdin, stdout, stderr. If we're started from e.g. an SSH session,
then this keeps us from holding that session open artificially."""
null_fd = ovs.socket_util.get_null_fd()
if null_fd >= 0:
os.dup2(null_fd, 0)
os.dup2(null_fd, 1)
os.dup2(null_fd, 2)
def daemonize_start():
"""If daemonization is configured, then starts daemonization, by forking
and returning in the child process. The parent process hangs around until
the child lets it know either that it completed startup successfully (by
calling daemon_complete()) or that it failed to start up (by exiting with a
nonzero exit code)."""
if _detach:
if _fork_and_wait_for_startup() > 0:
# Running in parent process.
sys.exit(0)
# Running in daemon or monitor process.
os.setsid()
if _monitor:
saved_daemonize_fd = _daemonize_fd
daemon_pid = _fork_and_wait_for_startup()
if daemon_pid > 0:
# Running in monitor process.
_fork_notify_startup(saved_daemonize_fd)
_close_standard_fds()
_monitor_daemon(daemon_pid)
# Running in daemon process
if _pidfile:
_make_pidfile()
def daemonize_complete():
"""If daemonization is configured, then this function notifies the parent
process that the child process has completed startup successfully."""
_fork_notify_startup(_daemonize_fd)
if _detach:
if _chdir:
os.chdir("/")
_close_standard_fds()
def usage():
sys.stdout.write("""
Daemon options:
--detach run in background as daemon
--no-chdir do not chdir to '/'
--pidfile[=FILE] create pidfile (default: %s/%s.pid)
--overwrite-pidfile with --pidfile, start even if already running
""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME))
def __read_pidfile(pidfile, delete_if_stale):
if _pidfile_dev is not None:
try:
s = os.stat(pidfile)
if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev:
# It's our own pidfile. We can't afford to open it,
# because closing *any* fd for a file that a process
# has locked also releases all the locks on that file.
#
# Fortunately, we know the associated pid anyhow.
return os.getpid()
except OSError:
pass
try:
file_handle = open(pidfile, "r+")
except IOError, e:
if e.errno == errno.ENOENT and delete_if_stale:
return 0
vlog.warn("%s: open: %s" % (pidfile, e.strerror))
return -e.errno
# Python fcntl doesn't directly support F_GETLK so we have to just try
# to lock it.
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
# pidfile exists but wasn't locked by anyone. Now we have the lock.
if not delete_if_stale:
file_handle.close()
vlog.warn("%s: pid file is stale" % pidfile)
return -errno.ESRCH
# Is the file we have locked still named 'pidfile'?
try:
raced = False
s = os.stat(pidfile)
s2 = os.fstat(file_handle.fileno())
if s.st_ino != s2.st_ino or s.st_dev != s2.st_dev:
raced = True
except IOError:
raced = True
if raced:
vlog.warn("%s: lost race to delete pidfile" % pidfile)
return -errno.EALREADY
# We won the right to delete the stale pidfile.
try:
os.unlink(pidfile)
except IOError, e:
vlog.warn("%s: failed to delete stale pidfile (%s)"
% (pidfile, e.strerror))
return -e.errno
else:
vlog.dbg("%s: deleted stale pidfile" % pidfile)
file_handle.close()
return 0
except IOError, e:
if e.errno not in [errno.EACCES, errno.EAGAIN]: | # Someone else has the pidfile locked.
try:
try:
error = int(file_handle.readline())
except IOError, e:
vlog.warn("%s: read: %s" % (pidfile, e.strerror))
error = -e.errno
except ValueError:
vlog.warn("%s does not contain a pid" % pidfile)
error = -errno.EINVAL
return error
finally:
try:
file_handle.close()
except IOError:
pass
def read_pidfile(pidfile):
"""Opens and reads a PID from 'pidfile'. Returns the positive PID if
successful, otherwise a negative errno value."""
return __read_pidfile(pidfile, False)
def _check_already_running():
pid = __read_pidfile(_pidfile, True)
if pid > 0:
_fatal("%s: already running as pid %d, aborting" % (_pidfile, pid))
elif pid < 0:
_fatal("%s: pidfile check failed (%s), aborting"
% (_pidfile, os.strerror(pid)))
def add_args(parser):
"""Populates 'parser', an ArgumentParser allocated using the argparse
module, with the command line arguments required by the daemon module."""
pidfile = make_pidfile_name(None)
group = parser.add_argument_group(title="Daemon Options")
group.add_argument("--detach", action="store_true",
help="Run in background as a daemon.")
group.add_argument("--no-chdir", action="store_true",
help="Do not chdir to '/'.")
group.add_argument("--monitor", action="store_true",
help="Monitor %s process." % ovs.util.PROGRAM_NAME)
group.add_argument("--pidfile", nargs="?", const=pidfile,
help="Create pidfile (default %s)." % pidfile)
group.add_argument("--overwrite-pidfile", action="store_true",
help="With --pidfile, start even if already running.")
def handle_args(args):
"""Handles daemon module settings in 'args'. 'args' is an object
containing values parsed by the parse_args() method of ArgumentParser. The
parent ArgumentParser should have been prepared by add_args() before
calling parse_args()."""
if args.detach:
set_detach()
if args.no_chdir:
set_no_chdir()
if args.pidfile:
set_pidfile(args.pidfile)
if args.overwrite_pidfile:
ignore_existing_pidfile()
if args.monitor:
set_monitor() | vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror))
return -e.errno
|
CurriculumVitae.js | import React from 'react';
import { useSelector } from 'react-redux';
import { selectTheme } from '../../redux/reducers/theme';
import styles from './CurriculumVitae.module.css';
export default function | () {
const isDarkMode = useSelector(selectTheme);
return (
<div className={`${styles.container} ${isDarkMode ? styles.dark : ''}`}>
<div id="cv" className={styles.center}>
<h2 className={`${styles.title} ${isDarkMode ? styles.dark : ''}`}>
<b>Curriculum Vitae</b>
</h2>
</div>
<div className={styles.content}>
<div>
<h3 className={`${styles.subtitle} ${isDarkMode ? styles.dark : ''}`}>
Work Background
</h3>
<div className={styles.subsection}>
<h4
className={`${styles.subsubtitle} ${
isDarkMode ? styles.dark : ''
}`}
>
PT. Kreasi Digital Indo Utama
</h4>
<h5 className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}>
Backend Developer
</h5>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
January 2021 - Now
</p>
</div>
<div className={styles.subsection}>
<h4
className={`${styles.subsubtitle} ${
isDarkMode ? styles.dark : ''
}`}
>
Bappppeda Sumedang
</h4>
<div className={styles.row}>
<div className={styles.col}>
<h5
className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}
>
Web Developer
</h5>
</div>
<div>
<h5
className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}
>
<span style={{ color: '#ff0000' }}>* </span>
Freelancing
</h5>
</div>
</div>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
January 2021
</p>
</div>
<div className={styles.subsection}>
<h4
className={`${styles.subsubtitle} ${
isDarkMode ? styles.dark : ''
}`}
>
PT. Kreasi Digital Indo Utama
</h4>
<div className={styles.row}>
<div className={styles.col}>
<h5
className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}
>
Fullstack Developer
</h5>
</div>
<div>
<h5
className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}
>
<span style={{ color: '#ff0000' }}>* </span>
Internship
</h5>
</div>
</div>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
October 2020 - December 2020
</p>
</div>
</div>
<div>
<h3 className={`${styles.subtitle} ${isDarkMode ? styles.dark : ''}`}>
Skills
</h3>
<div className={styles.subsection}>
<div className={styles.row}>
<div className={styles.col}>
<h5
className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}
>
Frontend Development
</h5>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
HTML
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
CSS
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
JavaScript
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
Bootstrap
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
React.js
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
Flutter
</p>
</div>
<div className={styles.col}>
<h5
className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}
>
Backend Development
</h5>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
Node.js
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
Express.js
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
PostgreSQL
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
MySQL
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
MongoDB
</p>
<p
className={`${styles.detail} ${
isDarkMode ? styles.dark : ''
}`}
>
Firebase
</p>
</div>
</div>
</div>
</div>
<div>
<h3 className={`${styles.subtitle} ${isDarkMode ? styles.dark : ''}`}>
Tools
</h3>
<div className={styles.subsection}>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
Visual Studio Code
</p>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
Postman
</p>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
Git
</p>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
Docker
</p>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
DBeaver
</p>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
MongoDB Compass
</p>
</div>
</div>
<div>
<h3 className={`${styles.subtitle} ${isDarkMode ? styles.dark : ''}`}>
Education
</h3>
<div className={styles.subsection}>
<h4
className={`${styles.subsubtitle} ${
isDarkMode ? styles.dark : ''
}`}
>
Universitas Komputer Indonesia
</h4>
<div className={styles.row}>
<div className={styles.col}>
<h5
className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}
>
Informatics Engineering
</h5>
</div>
<div>
<h5
className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}
>
<span style={{ color: '#ff0000' }}>* </span>
3.61 GPA
</h5>
</div>
</div>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
2016 - 2020
</p>
</div>
</div>
<div>
<h3 className={`${styles.subtitle} ${isDarkMode ? styles.dark : ''}`}>
Courses
</h3>
<div className={styles.subsection}>
<h4
className={`${styles.subsubtitle} ${
isDarkMode ? styles.dark : ''
}`}
>
Geek Portal Training
</h4>
<h5 className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}>
Collaborative Coding Bootcamp
</h5>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
September 2020 - October 2020
</p>
</div>
<div className={styles.subsection}>
<h4
className={`${styles.subsubtitle} ${
isDarkMode ? styles.dark : ''
}`}
>
UML and Object-Oriented Design Foundations
</h4>
<h5 className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}>
Udemy Online Course
</h5>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
2020
</p>
</div>
<div className={styles.subsection}>
<h4
className={`${styles.subsubtitle} ${
isDarkMode ? styles.dark : ''
}`}
>
Part Time Web Programmer Codepolitan
</h4>
<h5 className={`${styles.info} ${isDarkMode ? styles.dark : ''}`}>
Introduction to Web Development
</h5>
<p className={`${styles.detail} ${isDarkMode ? styles.dark : ''}`}>
August 2018 - November 2018
</p>
</div>
</div>
</div>
<div className={styles['spot-container']}>
<div
className={`
${styles.spot}
${styles['spot-solid']}
${isDarkMode ? styles.dark : ''}
${styles['spot-bottom-right']}
`}
/>
<div
className={`
${styles.spot}
${styles['spot-outline']}
${isDarkMode ? styles.dark : ''}
${styles['spot-bottom-right']}
`}
/>
</div>
</div>
);
}
| CurriculumVitae |
sort.go | package main
import (
"fmt"
"sort"
)
type StuScore struct {
name string // 姓名
score int // 成绩
}
type StuScores []StuScore
func (s StuScores) Len() int {
return len(s)
}
// v1 的 Less 实现的是升序排序,如果要得到降序排序结果,其实只要修改 Less() 函数:
func (s StuScores) Less(i, j int) bool {
return s[i].score > s[j].score
}
func (s StuScores) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func main() {
stus := StuScores{
{"alan", 95},
{"hikerell", 91},
{"acmfly", | {alan 95} {hikerell 91} {leao 90}]}
// name: alan, score is 95 | 96},
{"leao", 90},
}
// 未排序的 stus 数据
fmt.Println("Default:\n\t", stus)
// 调用 Sort 函数进行排序
sort.Sort(stus)
// 判断是否已经排好顺序
fmt.Println("IS Sorted?\n\t", sort.IsSorted(stus))
// 排序后的 stus 数据
fmt.Println("Sorted:\n\t", stus)
reversed := sort.Reverse(stus)
// 需要再次调用 Sort,才会颠倒集合的排序,因为 Reverse 函数其实只是修改了 Less 函数
sort.Sort(reversed)
// reverse 后的 stus 数据
fmt.Println("sort.Reverse:\n\t", reversed)
x := 95
searched := sort.Search(len(stus), func(i int) bool {
return stus[i].score >= x
})
fmt.Printf("name: %s, score is %d\n\t", stus[searched].name, stus[searched].score)
}
// Output:
// Default:
// [{alan 95} {hikerell 91} {acmfly 96} {leao 90}]
// IS Sorted?
// true
// Sorted:
// [{acmfly 96} {alan 95} {hikerell 91} {leao 90}]
// sort.Reverse:
// &{[{acmfly 96} |
describe_bandwidth_package_quota.go | package vpc
import (
github_com_morlay_goqcloud "github.com/morlay/goqcloud"
)
// 查询带宽包配额
// https://cloud.tencent.com/document/api/215/19210
type DescribeBandwidthPackageQuotaRequest struct {
// 区域
Region string `name:"Region"`
}
func (req *DescribeBandwidthPackageQuotaRequest) Invoke(client github_com_morlay_goqcloud.Client) (*DescribeBandwidthPackageQuotaResponse, error) {
resp := &DescribeBandwidthPackageQuotaResponse{}
err := client.Request("vpc", "DescribeBandwidthPackageQuota", "2017-03-12").Do(req, resp)
return resp, err
} | type DescribeBandwidthPackageQuotaResponse struct {
github_com_morlay_goqcloud.TencentCloudBaseResponse
// 带宽包配额数据结构
QuotaSet []*Quota `json:"QuotaSet"`
} | |
test.ts | // tests go here; this will not be compiled when this package is used as a library
{
const buf = hex`ff0000ff ff00ff00 ffff0000`; // red, green, blue colors
apa102.sendBuffer(buf, DigitalPin.P1, DigitalPin.P2);
pause(1000)
}
{
const buf = hex`ff0000 00ff00 0000ff`; // red, reen, blue colors
apa102.sendRGBBuffer(buf, DigitalPin.P1, DigitalPin.P2); | pause(1000)
}
{
const palette = hex`
00000
ff0000
00ff00
0000ff
ffffff
`; // add up to 16 colors
const buf = hex`01234`; // dark, red, green, blue, white
apa102.sendPaletteBuffer(buf, palette, DigitalPin.P1, DigitalPin.P2);
} | |
data.rs | pub(super) static IP_ADDRESSES: &[u64] = &[
28428538856079360, 134744072,
28428538856079360, 167969538,
28428538856079360, 167971081,
28428538856079360, 167969542,
28428538856079360, 168558928,
28428538856079360, 167840257,
28428538856079360, 168100606,
28428538856079360, 168558853,
28428538856079360, 168558854,
28428538856079360, 168558868,
28428538856079360, 168558874,
28428538856079360, 168558878,
28428538856079360, 167969544,
28428538856079360, 167969794,
28428538856079360, 168100862,
28428538856079360, 168558876,
28428538856079360, 168558898,
28428538856079360, 168558934,
28428538856079360, 168558938,
28428538856079360, 168558940,
28428538856079360, 167969541,
28428538856079360, 167969767,
28428538856079360, 167969773,
28428538856079360, 167970049,
28428538856079360, 168558849,
28428538856079360, 168558855,
28428538856079360, 168558872,
28428538856079360, 167839230,
28428538856079360, 167969690,
28428538856079360, 167969765,
28428538856079360, 167969774,
28428538856079360, 168558899,
28428538856079360, 167954942,
28428538856079360, 167969786,
28428538856079360, 168558870,
];
pub(super) static TEXT_DATA: &[&str] = &[
r##"[2017-04-18 22:15:45.204116 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.11 {"cpmCPUTotal1minRev": U32(1), "cpmCPUTotal5secRev": U32(1), "cpmCPUTotal5minRev": U32(1)}"##,
r##"[2017-04-18 22:15:46.199988 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.6 {"cpmCPUTotal5secRev": U32(6), "cpmCPUTotal1minRev": U32(6), "cpmCPUTotal5minRev": U32(5)}"##,
r##"[2017-04-18 22:15:46.232624 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.1.6 {"cpmCPUTotal1minRev": U32(6), "cpmCPUTotal5minRev": U32(6), "cpmCPUTotal5secRev": U32(7)}"##,
r##"[2017-04-18 22:15:46.608553 +02:00] INFO [src/poller.rs:147] Polling result for host 10.3.3.2 {"cpmCPUTotal5secRev": U32(4), "cpmCPUTotal5minRev": U32(4), "cpmCPUTotal1minRev": U32(4)}"##,
r##"[2017-04-18 22:15:47.240989 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.1.5 {"cpmCPUTotal5minRev": U32(28), "cpmCPUTotal5secRev": U32(27), "cpmCPUTotal1minRev": U32(29)}"##,
r##"[2017-04-18 22:15:48.201469 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.2 {"cpmCPUTotal1minRev": U32(2), "cpmCPUTotal5minRev": U32(2), "cpmCPUTotal5secRev": U32(1)}"##,
r##"[2017-04-18 22:15:49.201188 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.1 {"cpmCPUTotal1minRev": U32(4), "cpmCPUTotal5secRev": U32(5), "cpmCPUTotal5minRev": U32(3)}"##,
r##"[2017-04-18 22:15:49.204710 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.3 {"cpmCPUTotal5secRev": U32(1), "cpmCPUTotal5minRev": U32(1), "cpmCPUTotal1minRev": U32(1)}"##,
r##"[2017-04-18 22:15:49.205035 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.10 {"cpmCPUTotal5minRev": U32(8), "cpmCPUTotal5secRev": U32(8), "cpmCPUTotal1minRev": U32(8)}"##,
r##"[2017-04-18 22:15:49.205433 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.13 {"cpmCPUTotal1minRev": U32(1), "cpmCPUTotal5minRev": U32(1), "cpmCPUTotal5secRev": U32(1)}"##,
r##"[2017-04-18 22:15:49.210189 +02:00] INFO [src/poller.rs:147] Polling result for host 172.19.19.1 {"cpmCPUTotal1minRev": U32(7), "cpmCPUTotal5minRev": U32(5), "cpmCPUTotal5secRev": U32(13)}"##,
r##"[2017-04-18 22:15:49.600605 +02:00] INFO [src/poller.rs:147] Polling result for host 10.3.3.1 {"cpmCPUTotal5minRev": U32(0), "cpmCPUTotal1minRev": U32(0), "cpmCPUTotal5secRev": U32(0)}"##,
r##"[2017-04-18 22:15:50.204099 +02:00] INFO [src/poller.rs:147] Polling result for host 10.80.80.2 {"cpmCPUTotal5secRev": U32(0), "cpmCPUTotal5minRev": U32(1), "cpmCPUTotal1minRev": U32(1)}"##,
r##"[2017-04-18 22:15:50.205165 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.11 {"cpmCPUTotal1minRev": U32(1), "cpmCPUTotal5secRev": U32(1), "cpmCPUTotal5minRev": U32(1)}"##,
r##"[2017-04-18 22:15:51.200809 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.6 {"cpmCPUTotal5secRev": U32(6), "cpmCPUTotal1minRev": U32(6), "cpmCPUTotal5minRev": U32(5)}"##,
r##"[2017-04-18 22:15:51.221169 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.1.6 {"cpmCPUTotal1minRev": U32(6), "cpmCPUTotal5secRev": U32(6), "cpmCPUTotal5minRev": U32(6)}"##,
r##"[2017-04-18 22:15:51.611120 +02:00] INFO [src/poller.rs:147] Polling result for host 10.3.3.2 {"cpmCPUTotal5secRev": U32(3), "cpmCPUTotal5minRev": U32(4), "cpmCPUTotal1minRev": U32(4)}"##,
r##"[2017-04-18 22:15:52.230092 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.1.5 {"cpmCPUTotal1minRev": U32(28), "cpmCPUTotal5minRev": U32(28), "cpmCPUTotal5secRev": U32(28)}"##,
r##"[2017-04-18 22:15:52.590393 +02:00] INFO [src/poller.rs:147] Polling result for host 10.3.5.1 {"cpmCPUTotal5minRev": U32(14), "cpmCPUTotal1minRev": U32(17), "cpmCPUTotal5secRev": U32(20)}"##,
r##"[2017-04-18 22:15:52.605963 +02:00] INFO [src/poller.rs:147] Polling result for host 10.3.3.5 {"cpmCPUTotal1minRev": U32(5), "cpmCPUTotal5secRev": U32(4), "cpmCPUTotal5minRev": U32(5)}"##,
r##"[2017-04-18 22:15:53.201867 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.2 {"cpmCPUTotal1minRev": U32(2), "cpmCPUTotal5secRev": U32(2), "cpmCPUTotal5minRev": U32(2)}"##,
r##"[2017-04-18 22:15:54.201623 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.1 {"cpmCPUTotal5secRev": U32(5), "cpmCPUTotal1minRev": U32(4), "cpmCPUTotal5minRev": U32(3)}"##,
r##"[2017-04-18 22:15:54.207444 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.3 {"cpmCPUTotal5secRev": U32(2), "cpmCPUTotal1minRev": U32(1), "cpmCPUTotal5minRev": U32(1)}"##,
r##"[2017-04-18 22:15:54.208925 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.10 {"cpmCPUTotal5secRev": U32(8), "cpmCPUTotal5minRev": U32(8), "cpmCPUTotal1minRev": U32(8)}"##,
r##"[2017-04-18 22:15:54.210605 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.13 {"cpmCPUTotal5minRev": U32(1), "cpmCPUTotal1minRev": U32(1), "cpmCPUTotal5secRev": U32(1)}"##,
r##"[2017-04-18 22:15:54.216638 +02:00] INFO [src/poller.rs:147] Polling result for host 172.19.19.1 {"cpmCPUTotal5minRev": U32(5), "cpmCPUTotal1minRev": U32(8), "cpmCPUTotal5secRev": U32(14)}"##, | ]; | r##"[2017-04-18 22:15:54.602079 +02:00] INFO [src/poller.rs:147] Polling result for host 10.3.3.1 {"cpmCPUTotal5secRev": U32(0), "cpmCPUTotal1minRev": U32(0), "cpmCPUTotal5minRev": U32(0)}"##,
r##"[2017-04-18 22:15:55.204235 +02:00] INFO [src/poller.rs:147] Polling result for host 10.80.80.2 {"cpmCPUTotal5secRev": U32(0), "cpmCPUTotal5minRev": U32(1), "cpmCPUTotal1minRev": U32(1)}"##,
r##"[2017-04-18 22:15:55.206436 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.11 {"cpmCPUTotal5minRev": U32(1), "cpmCPUTotal1minRev": U32(1), "cpmCPUTotal5secRev": U32(1)}"##,
r##"[2017-04-18 22:15:56.200047 +02:00] INFO [src/poller.rs:147] Polling result for host 10.12.2.6 {"cpmCPUTotal5secRev": U32(6), "cpmCPUTotal5minRev": U32(6), "cpmCPUTotal1minRev": U32(6)}"##, |
ingress.go | /*
Copyright The Kubernetes Authors.
Copyright 2020 Authors of Arktos - file modified.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1beta1
import (
v1beta1 "k8s.io/api/networking/v1beta1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// IngressLister helps list Ingresses.
type IngressLister interface {
// List lists all Ingresses in the indexer.
List(selector labels.Selector) (ret []*v1beta1.Ingress, err error)
// Ingresses returns an object that can list and get Ingresses.
Ingresses(namespace string) IngressNamespaceLister
IngressesWithMultiTenancy(namespace string, tenant string) IngressNamespaceLister
IngressListerExpansion
}
// ingressLister implements the IngressLister interface.
type ingressLister struct {
indexer cache.Indexer
}
// NewIngressLister returns a new IngressLister.
func | (indexer cache.Indexer) IngressLister {
return &ingressLister{indexer: indexer}
}
// List lists all Ingresses in the indexer.
func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.Ingress))
})
return ret, err
}
// Ingresses returns an object that can list and get Ingresses.
func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister {
return ingressNamespaceLister{indexer: s.indexer, namespace: namespace, tenant: "system"}
}
func (s *ingressLister) IngressesWithMultiTenancy(namespace string, tenant string) IngressNamespaceLister {
return ingressNamespaceLister{indexer: s.indexer, namespace: namespace, tenant: tenant}
}
// IngressNamespaceLister helps list and get Ingresses.
type IngressNamespaceLister interface {
// List lists all Ingresses in the indexer for a given tenant/namespace.
List(selector labels.Selector) (ret []*v1beta1.Ingress, err error)
// Get retrieves the Ingress from the indexer for a given tenant/namespace and name.
Get(name string) (*v1beta1.Ingress, error)
IngressNamespaceListerExpansion
}
// ingressNamespaceLister implements the IngressNamespaceLister
// interface.
type ingressNamespaceLister struct {
indexer cache.Indexer
namespace string
tenant string
}
// List lists all Ingresses in the indexer for a given namespace.
func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) {
err = cache.ListAllByNamespace(s.indexer, s.tenant, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1beta1.Ingress))
})
return ret, err
}
// Get retrieves the Ingress from the indexer for a given namespace and name.
func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) {
key := s.tenant + "/" + s.namespace + "/" + name
if s.tenant == "system" {
key = s.namespace + "/" + name
}
obj, exists, err := s.indexer.GetByKey(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name)
}
return obj.(*v1beta1.Ingress), nil
}
| NewIngressLister |
fa.js | (function(e){const i=e["fa"]=e["fa"]||{};i.dictionary=Object.assign(i.dictionary||{},{"%0 of %1":"0% از 1%","Block quote":" بلوک نقل قول",Bold:"درشت","Bulleted List":"لیست نشانهدار",Cancel:"لغو","Cannot upload file:":"فایل آپلود نمیشود:","Centered image":"تصویر در وسط","Change image text alternative":"تغییر متن جایگزین تصویر","Choose heading":"انتخاب عنوان",Column:"ستون","Could not insert image at the current position.":"نمیتوان تصویر را در موقعیت فعلی وارد کرد","Could not obtain resized image URL.":"نمیتوان آدرس اینترنتی تصویر تغییر اندازه یافته را بدست آورد","Decrease indent":"کاهش تورفتگی","Delete column":"حذف ستون","Delete row":"حذف سطر",Downloadable:"قابل بارگیری","Dropdown toolbar":"نوارابزار کشویی","Edit link":"ویرایش پیوند","Editor toolbar":"نوارابزار ویرایشگر","Enter image caption":"عنوان تصویر را وارد کنید","Full size image":"تصویر در اندازه کامل","Header column":"ستون سربرگ","Header row":"سطر سربرگ",Heading:"عنوان","Heading 1":"عنوان 1","Heading 2":"عنوان 2","Heading 3":"عنوان 3","Heading 4":"عنوان 4","Heading 5":"عنوان 5","Heading 6":"عنوان 6","Image resize list":"","Image toolbar":"نوارابزار تصویر","image widget":"ابزاره تصویر","Increase indent":"افزایش تورفتگی","Insert column left":"درج ستون در سمت چپ","Insert column right":"درج ستون در سمت راست","Insert image":"قرار دادن تصویر","Insert image or file":"وارد کردن تصویر یا فایل","Insert media":"وارد کردن رسانه","Insert paragraph after block":"درج پاراگراف بعد از بلوک","Insert paragraph before block":"درج پاراگراف قبل از بلوک","Insert row above":"درج سطر در بالا","Insert row below":"درج سطر در پایین","Insert table":"درج جدول","Inserting image failed":"وارد کردن تصویر انجام نشد",Italic:"کج","Left aligned image":"تصویر تراز شده چپ",Link:"پیوند","Link URL":"نشانی اینترنتی پیوند","Media URL":"آدرس اینترنتی رسانه","media widget":"ویجت رسانه","Merge cell down":"ادغام سلول پایین","Merge cell left":"ادغام سلول چپ","Merge cell right":"ادغام سلول راست","Merge cell up":"ادغام سلول بالا","Merge cells":"ادغام سلول ها",Next:"بعدی","Numbered List":"لیست عددی","Open in a new tab":"بازکردن در برگه جدید","Open link in new tab":"باز کردن پیوند در برگه جدید",Original:"",Paragraph:"پاراگراف","Paste the media URL in the input.":"آدرس رسانه را در ورودی قرار دهید",Previous:"قبلی",Redo:"باز انجام","Resize image":"","Resize image to %0":"","Resize image to the original size":"","Rich Text Editor":"ویرایشگر متن غنی","Rich Text Editor, %0":"ویرایشگر متن غنی، %0","Right aligned image":"تصویر تراز شده راست",Row:"سطر",Save:"ذخیره","Select all":"انتخاب همه","Select column":"","Select row":"","Selecting resized image failed":"انتخاب تصویر تغییر اندازه یافته انجام نشد","Show more items":"","Side image":"تصویر جانبی","Split cell horizontally":"تقسیم افقی سلول","Split cell vertically":"تقسیم عمودی سلول","Table toolbar":"نوارابزار جدول","Text alternative":"متن جایگزین","The URL must not be empty.":"آدرس اینترنتی URL نباید خالی باشد.","This link has no URL":"این پیوند نشانی اینترنتی ندارد","This media URL is not supported.":"این آدرس اینترنتی رسانه پشتیبانی نمیشود","Tip: Paste the URL into the content to embed faster.":"نکته : آدرس را در محتوا قراردهید تا سریع تر جاسازی شود",Undo:"بازگردانی",Unlink:"لغو پیوند","Upload failed":"آپلود ناموفق بود","Upload in progress":"آپلود در حال انجام","Widget toolbar":"نوار ابزار ویجت"});i.getPluralForm=function(e){return e>1}})(window.CKEDITOR_TRANSLATIONS||(window.CKEDITOR_TRANSLATIONS={})); |
||
wasserstein.rs | use ndarray::Array2;
use crate::graph::{Edge, Graph, Vertex};
use std::collections::HashMap;
pub fn wasserstein_1d(left: Vec<u64>, right: Vec<u64>) -> Result<usize, String> {
let total_supply: u64 = left.iter().sum();
let total_demand: u64 = right.iter().sum();
if left.len() != right.len() {
Err("Wasserstein is not implemented between histograms with different shapes".to_string())
} else if total_supply > std::i32::MAX as u64 {
Err("total supply must fit in i32".to_string())
} else if total_supply != total_demand {
Err("Wasserstein is not implemented for cases where supply != demand.".to_string())
} else {
let left_vertices: Vec<Vertex> = left
.iter()
.enumerate()
.map(|(i, &v)| Vertex {
index: i, | .collect();
let mut right_vertices: Vec<Vertex> = right
.iter()
.enumerate()
.map(|(i, &v)| Vertex {
index: i + left.len(),
coordinates: (i, 0),
supply: -(v as i64),
})
.collect();
let mut edges: Vec<Edge> = Vec::new();
for &l in left_vertices.iter() {
for &r in right_vertices.iter() {
edges.push(Edge::new(l, r));
}
}
let mut vertices = left_vertices;
vertices.append(&mut right_vertices);
let mut graph = Graph {
vertices,
edges,
max_capacity: total_supply as usize,
};
let total_cost = graph.mcmf()?;
Ok(total_cost)
}
}
pub fn wasserstein_2d(left: Array2<u64>, right: Array2<u64>) -> Result<usize, String> {
let total_supply: u64 = left.sum();
let total_demand: u64 = right.sum();
if left.shape() != right.shape() {
Err("Wasserstein is not implemented between histograms with different shapes".to_string())
} else if total_supply > std::i32::MAX as u64 {
Err("total supply must fit in i32".to_string())
} else if total_supply != total_demand {
Err("Wasserstein is not implemented for cases where supply != demand.".to_string())
} else {
let (n, m) = (left.shape()[0], left.shape()[1]);
let num_bins = n * m;
let mut vertices = vec![];
let mut sources: HashMap<(usize, usize), Vertex> = HashMap::new();
let mut middles: HashMap<(usize, usize), Vertex> = HashMap::new();
let mut sinks: HashMap<(usize, usize), Vertex> = HashMap::new();
for l in 0..3 {
for i in 0..n {
for j in 0..m {
let index = l * num_bins + m * i + j;
let supply = if l == 0 {
left[[i, j]] as i64
} else if l == 2 {
-(right[[i, j]] as i64)
} else {
0
};
let vertex = Vertex {
index,
supply,
coordinates: (i, j),
};
if l == 0 {
sources.insert((i, j), vertex);
} else if l == 1 {
middles.insert((i, j), vertex);
} else {
sinks.insert((i, j), vertex);
}
vertices.push(vertex);
}
}
}
let mut edges = vec![];
for i in 0..n {
for j in 0..m {
let source_coordinates = (i, j);
let middle_coordinates: Vec<(usize, usize)> = (0..m).map(|k| (i, k)).collect();
for &middle in middle_coordinates.iter() {
edges.push(Edge::new(
*sources.get(&source_coordinates).unwrap(),
*middles.get(&middle).unwrap(),
))
}
let sink_coordinates: Vec<(usize, usize)> = (0..n).map(|k| (k, j)).collect();
for &sink in sink_coordinates.iter() {
edges.push(Edge::new(
*middles.get(&source_coordinates).unwrap(),
*sinks.get(&sink).unwrap(),
))
}
}
}
let mut graph = Graph {
vertices,
edges,
max_capacity: total_supply as usize,
};
let total_cost = graph.mcmf()?;
Ok(total_cost)
}
} | coordinates: (i, 0),
supply: v as i64,
}) |
menutab.js | /**
* @fileOverview
*
* 用 FUI.Tabs 实现的多级的创建
*
* @author: techird
* @copyright: Baidu FEX, 2014
*/
KM.registerUI('widget/menutab', function(minder) {
function generate(parent, n | efault) {
var index = parent.getButtons().length;
var tab = parent.appendTab({
buttons: [{
label: minder.getLang('ui.menu.' + name + 'tab'),
className: 'tab-' + name
}]
});
if (asDefault) {
parent.select(index);
}
return tab[0].panel.getContentElement();
}
return {
generate: generate
};
});
| ame, asD |
1f745726.ab217132.js | (window.webpackJsonp=window.webpackJsonp||[]).push([[47],{119:function(e,n,t){"use strict";t.r(n),t.d(n,"frontMatter",(function(){return s})),t.d(n,"metadata",(function(){return i})),t.d(n,"toc",(function(){return c})),t.d(n,"default",(function(){return p}));var r=t(3),a=t(8),o=(t(0),t(453)),s={id:"username-email-password",title:"Username / Email & Password"},i={unversionedId:"concepts/credentials/username-email-password",id:"version-v0.2/concepts/credentials/username-email-password",isDocsHomePage:!1,title:"Username / Email & Password",description:"The password method is the most commonly used form of authentication, it",source:"@site/versioned_docs/version-v0.2/concepts/credentials/username-email-password.md",sourceDirName:"concepts/credentials",slug:"/concepts/credentials/username-email-password",permalink:"/kratos/docs/v0.2/concepts/credentials/username-email-password",editUrl:"https://github.com/ory/kratos/edit/master/docs/versioned_docs/version-v0.2/concepts/credentials/username-email-password.md",version:"v0.2",lastUpdatedBy:"aeneasr",lastUpdatedAt:1600065426,formattedLastUpdatedAt:"9/14/2020",frontMatter:{id:"username-email-password",title:"Username / Email & Password"},sidebar:"version-v0.2/docs",previous:{title:"Overview",permalink:"/kratos/docs/v0.2/concepts/credentials"},next:{title:"Social Sign In, OpenID Connect, and OAuth2",permalink:"/kratos/docs/v0.2/concepts/credentials/openid-connect-oidc-oauth2"}},c=[{value:"Configuration",id:"configuration",children:[]},{value:"JSON Schema",id:"json-schema",children:[]},{value:"Example",id:"example",children:[]}],l={toc:c};function p(e){var n=e.components,t=Object(a.a)(e,["components"]);return Object(o.b)("wrapper",Object(r.a)({},l,t,{components:n,mdxType:"MDXLayout"}),Object(o.b)("p",null,"The ",Object(o.b)("inlineCode",{parentName:"p"},"password")," method is the most commonly used form of authentication, it\nrequires an ",Object(o.b)("inlineCode",{parentName:"p"},"identifier")," (username, email, phone number, ...) and a ",Object(o.b)("inlineCode",{parentName:"p"},"password"),"\nduring registration and login."),Object(o.b)("p",null,"ORY Kratos hashes the password after registration, password reset, and password\nchange using ",Object(o.b)("a",{parentName:"p",href:"https://github.com/P-H-C/phc-winner-argon2"},"Argon2"),", the winner of\nthe Password Hashing Competition (PHC)."),Object(o.b)("h2",{id:"configuration"},"Configuration"),Object(o.b)("p",null,"Enabling this method is as easy as setting"),Object(o.b)("pre",null,Object(o.b)("code",{parentName:"pre",className:"language-yaml"},"selfservice:\n strategies:\n password:\n enabled: true\n")),Object(o.b)("p",null,"in your ORY Kratos configuration. You can configure the Argon2 hasher using the\nfollowing options:"),Object(o.b)("pre",null,Object(o.b)("code",{parentName:"pre",className:"language-yaml"},"hashers:\n argon2:\n parallelism: 1\n memory: 131072 # 128MB\n iterations: 3\n salt_length: 16\n key_length: 32\n")),Object(o.b)("p",null,"For a complete reference, defaults, and description please check the\n",Object(o.b)("a",{parentName:"p",href:"/kratos/docs/v0.2/reference/configuration"},"Configuration Reference"),"."),Object(o.b)("p",null,"For a better understanding of security implications imposed by Argon2\nConfiguration, head over to ",Object(o.b)("a",{parentName:"p",href:"/kratos/docs/v0.2/concepts/security#argon2"},"Argon2 Security"),"."),Object(o.b)("h2",{id:"json-schema"},"JSON Schema"),Object(o.b)("p",null,"When processing an identity and its traits, the method will use the JSON Schema\nto extract one or more identifiers. Assuming you want your identities to sign up\nwith an email address, and use that email address as a valid identifier during\nlogin, you can use a schema along the lines of:"),Object(o.b)("pre",null,Object(o.b)("code",{parentName:"pre",className:"language-json"},'{\n "$id": "https://example.com/example.json",\n "$schema": "http://json-schema.org/draft-07/schema#",\n "title": "Person",\n "type": "object",\n "properties": {\n "email": {\n "type": "string",\n "format": "email",\n "title": "E-Mail",\n "ory.sh/kratos": {\n "credentials": {\n "password": {\n "identifier": true\n }\n }\n }\n }\n }\n}\n')),Object(o.b)("p",null,"If you want a unique username instead, you could write the schema as follows:"),Object(o.b)("pre",null,Object(o.b)("code",{parentName:"pre",className:"language-json"},'{\n "$id": "https://example.com/example.json",\n "$schema": "http://json-schema.org/draft-07/schema#",\n "title": "Person",\n "type": "object",\n "properties": {\n "username": {\n "type": "string",\n "title": "Username",\n "ory.sh/kratos": {\n "credentials": {\n "password": {\n "identifier": true\n }\n }\n }\n }\n }\n}\n')),Object(o.b)("p",null,'You are not limited to one identifier per identity. You could also combine both\nfields and support a use case of "username" and "email" as an identifier for\nlogin:'),Object(o.b)("pre",null,Object(o.b)("code",{parentName:"pre",className:"language-json"},'{\n "$id": "https://example.com/example.json",\n "$schema": "http://json-schema.org/draft-07/schema#",\n "title": "Person",\n "type": "object",\n "properties": {\n "email": {\n "type": "string",\n "format": "email",\n "title": "E-Mail",\n "ory.sh/kratos": {\n "credentials": {\n "password": {\n "identifier": true\n }\n }\n }\n },\n "username": {\n "type": "string",\n "title": "Username",\n "ory.sh/kratos": {\n "credentials": {\n "password": {\n "identifier": true\n }\n }\n }\n }\n }\n}\n')),Object(o.b)("h2",{id:"example"},"Example"),Object(o.b)("p",null,"Assuming your traits schema is as follows:"),Object(o.b)("pre",null,Object(o.b)("code",{parentName:"pre",className:"language-json"},'{\n "$id": "https://example.com/example.json",\n "$schema": "http://json-schema.org/draft-07/schema#",\n "title": "Person",\n "type": "object",\n "properties": {\n "first_name": {\n "type": "string"\n },\n "email": {\n "type": "string",\n "format": "email",\n "ory.sh/kratos": {\n "credentials": {\n "password": {\n "identifier": true\n }\n }\n }\n },\n "username": {\n "type": "string",\n "ory.sh/kratos": {\n "credentials": {\n "password": {\n "identifier": true\n }\n }\n }\n }\n },\n "additionalProperties": false\n}\n')),Object(o.b)("p",null,"And an identity registers with the following JSON payload (more on registration\nin\n",Object(o.b)("a",{parentName:"p",href:"/kratos/docs/v0.2/self-service/flows/user-login-user-registration"},"Selfservice Registration"),"):"),Object(o.b)("pre",null,Object(o.b)("code",{parentName:"pre",className:"language-json"},'{\n "traits": {\n "first_name": "John Doe",\n "email": "[email protected]",\n "username": "johndoe123"\n },\n "password": "my-secret-password"\n}\n')),Object(o.b)("p",null,"The ",Object(o.b)("inlineCode",{parentName:"p"},"password")," method would generate a credentials block as follows:"),Object(o.b)("pre",null,Object(o.b)("code",{parentName:"pre",className:"language-yaml"},"credentials:\n password:\n id: password\n identifiers:\n - [email protected]\n - johndoe123\n config:\n hashed_password: ... # this would be `argon2(my-secret-password)`\n")),Object(o.b)("p",null,"Because credential identifiers need to be unique, no other identity can be\ncreated that has ",Object(o.b)("inlineCode",{parentName:"p"},"johndoe123")," or ",Object(o.b)("inlineCode",{parentName:"p"},"[email protected]")," as their ",Object(o.b)("inlineCode",{parentName:"p"},"email")," or\n",Object(o.b)("inlineCode",{parentName:"p"},"username"),"."))}p.isMDXComponent=!0},453:function(e,n,t){"use strict";t.d(n,"a",(function(){return d})),t.d(n,"b",(function(){return b}));var r=t(0),a=t.n(r);function o(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function s(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function i(e){for(var n=1;n<arguments.length;n++){var t=null!=arguments[n]?arguments[n]:{};n%2?s(Object(t),!0).forEach((function(n){o(e,n,t[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(t)):s(Object(t)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(t,n))}))}return e}function c(e,n){if(null==e)return{};var t,r,a=function(e,n){if(null==e)return{};var t,r,a={},o=Object.keys(e);for(r=0;r<o.length;r++)t=o[r],n.indexOf(t)>=0||(a[t]=e[t]);return a}(e,n);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r<o.length;r++)t=o[r],n.indexOf(t)>=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(a[t]=e[t])}return a}var l=a.a.createContext({}),p=function(e){var n=a.a.useContext(l),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},d=function(e){var n=p(e.components);return a.a.createElement(l.Provider,{value:n},e.children)},m={inlineCode:"code",wrapper:function(e){var n=e.children;return a.a.createElement(a.a.Fragment,{},n)}},u=a.a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,o=e.originalType,s=e.parentName,l=c(e,["components","mdxType","originalType","parentName"]),d=p(t),u=r,b=d["".concat(s,".").concat(u)]||d[u]||m[u]||o;return t?a.a.createElement(b,i(i({ref:n},l),{},{components:t})):a.a.createElement(b,i({ref:n},l))}));function | (e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var o=t.length,s=new Array(o);s[0]=u;var i={};for(var c in n)hasOwnProperty.call(n,c)&&(i[c]=n[c]);i.originalType=e,i.mdxType="string"==typeof e?e:r,s[1]=i;for(var l=2;l<o;l++)s[l]=t[l];return a.a.createElement.apply(null,s)}return a.a.createElement.apply(null,t)}u.displayName="MDXCreateElement"}}]); | b |
FlutterElement.py | from Element.FlutterFind import FlutterFind
from selenium.common.exceptions import WebDriverException, NoSuchElementException
from Utilitys.WaitUtils import WaitUtils
class FlutterElement(FlutterFind):
def __init__(self, driver):
FlutterFind.__init__(self)
self.driver = driver
self.interval = 0.5
self.timeout = 20
def find_flutter_element_and_click(self, value):
try:
self.driver.find_flutter_element(value).click()
except Exception as e:
raise NoSuchElementException
def flutter_scroll_to_text(self, value):
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.execute_script(
"flutter:scrollIntoView", value, 0.1)
except Exception as handleRetry:
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.execute_script(
"flutter:scrollIntoView", value, 0.1)
except Exception as e:
raise NoSuchElementException | WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.elementSendKeys(locator, value)
except Exception as handleRetry:
try:
WaitUtils.flutter_wait_for_element(self.driver, value)
self.driver.elementSendKeys(locator, value)
except Exception as e:
raise NoSuchElementException |
def find_flutter_element_sendkeys(self, locator, value):
try: |
memcache_test.go | // +build !integration
package memcache
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/elastic/beats/libbeat/common"
)
type memcacheTest struct {
mc *Memcache
transactions []*transaction
}
func newMemcacheTest(config memcacheConfig) *memcacheTest {
mct := &memcacheTest{}
mc := &Memcache{}
mc.init(nil, &config)
mc.handler = mct
mct.mc = mc
return mct
}
func (mct *memcacheTest) onTransaction(t *transaction) {
mct.transactions = append(mct.transactions, t)
}
func (mct *memcacheTest) genTransaction(requ, resp *message) *transaction {
if requ != nil {
requ.CmdlineTuple = &common.CmdlineTuple{}
}
if resp != nil {
resp.CmdlineTuple = &common.CmdlineTuple{}
}
t := newTransaction(requ, resp)
mct.mc.finishTransaction(t)
return t
}
func makeBinMessage(
t *testing.T,
hdr *binHeader,
extras []extraFn,
key valueFn,
value valueFn,
) *message {
buf, err := prepareBinMessage(hdr, extras, key, value)
if err != nil {
t.Fatalf("generating bin message failed with: %v", err)
}
return binParseNoFail(t, buf.Bytes())
}
func makeTransactionEvent(t *testing.T, trans *transaction) common.MapStr {
event := common.MapStr{}
err := trans.Event(event)
if err != nil {
t.Fatalf("serializing transaction failed with: %v", err)
}
return event
}
func Test_TryMergeUnmergeableRespnses(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
msg1 := textParseNoFail(t, "STORED\r\n")
msg2 := textParseNoFail(t, "0\r\n")
b, err := tryMergeResponses(mct.mc, msg1, msg2)
assert.False(t, b)
assert.Nil(t, err)
}
func Test_TryMergeUnmergeableResponseWithValue(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
msg1 := textParseNoFail(t, "VALUE k 1 5 3\r\nvalue\r\n")
msg2 := textParseNoFail(t, "0\r\n")
b, err := tryMergeResponses(mct.mc, msg1, msg2)
assert.False(t, b)
assert.Nil(t, err)
}
func | (t *testing.T) {
mct := newMemcacheTest(defaultConfig)
msg1 := textParseNoFail(t, "STAT name value\r\n")
msg2 := textParseNoFail(t, "0\r\n")
b, err := tryMergeResponses(mct.mc, msg1, msg2)
assert.False(t, b)
assert.Nil(t, err)
}
func Test_MergeTextValueResponses(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
msg1 := textParseNoFail(t, "VALUE k 1 6 3\r\nvalue1\r\n")
msg2 := textParseNoFail(t, "VALUE k 1 6 3\r\nvalue2\r\n")
msg3 := textParseNoFail(t, "END\r\n")
b, err := tryMergeResponses(mct.mc, msg1, msg2)
assert.True(t, b)
assert.Nil(t, err)
assert.False(t, msg1.isComplete)
b, err = tryMergeResponses(mct.mc, msg1, msg3)
assert.True(t, b)
assert.Nil(t, err)
assert.True(t, msg1.isComplete)
}
func Test_MergeTextStatsValueResponses(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
msg1 := textParseNoFail(t, "STAT name1 value1\r\n")
msg2 := textParseNoFail(t, "STAT name2 value2\r\n")
msg3 := textParseNoFail(t, "END\r\n")
b, err := tryMergeResponses(mct.mc, msg1, msg2)
assert.True(t, b)
assert.Nil(t, err)
assert.False(t, msg1.isComplete)
b, err = tryMergeResponses(mct.mc, msg1, msg3)
assert.True(t, b)
assert.Nil(t, err)
assert.True(t, msg1.isComplete)
}
func Test_MergeBinaryStatsValueResponses(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
msg1 := makeBinMessage(t,
&binHeader{opcode: opcodeStat, request: false},
extras(), key("stat1"), value("value1"))
msg2 := makeBinMessage(t,
&binHeader{opcode: opcodeStat, request: false},
extras(), key("stat2"), value("value2"))
msg3 := makeBinMessage(t,
&binHeader{opcode: opcodeStat, request: false},
extras(), noKey, noValue)
b, err := tryMergeResponses(mct.mc, msg1, msg2)
assert.True(t, b)
assert.Nil(t, err)
assert.False(t, msg1.isComplete)
b, err = tryMergeResponses(mct.mc, msg1, msg3)
assert.True(t, b)
assert.Nil(t, err)
assert.True(t, msg1.isComplete)
}
func Test_MergeTextValueResponsesNoLimits(t *testing.T) {
config := defaultConfig
config.MaxValues = -1
config.MaxBytesPerValue = 1000
mct := newMemcacheTest(config)
msg1 := textParseNoFail(t, "VALUE k1 1 6 3\r\nvalue1\r\n")
msg2 := textParseNoFail(t, "VALUE k2 1 6 3\r\nvalue2\r\n")
msg3 := textParseNoFail(t, "END\r\n")
b, err := tryMergeResponses(mct.mc, msg1, msg2)
assert.True(t, b)
assert.Nil(t, err)
assert.False(t, msg1.isComplete)
b, err = tryMergeResponses(mct.mc, msg1, msg3)
assert.True(t, b)
assert.Nil(t, err)
assert.True(t, msg1.isComplete)
msg := msg1
assert.Equal(t, "k1", msg.keys[0].String())
assert.Equal(t, "k2", msg.keys[1].String())
assert.Equal(t, uint32(2), msg.count_values)
assert.Equal(t, "value1", msg.values[0].String())
assert.Equal(t, "value2", msg.values[1].String())
}
func Test_MergeTextValueResponsesWithLimits(t *testing.T) {
config := defaultConfig
config.MaxValues = 1
config.MaxBytesPerValue = 1000
mct := newMemcacheTest(config)
msg1 := textParseNoFail(t, "VALUE k1 1 6 3\r\nvalue1\r\n")
msg2 := textParseNoFail(t, "VALUE k2 1 6 3\r\nvalue2\r\n")
msg3 := textParseNoFail(t, "END\r\n")
b, err := tryMergeResponses(mct.mc, msg1, msg2)
assert.True(t, b)
assert.Nil(t, err)
assert.False(t, msg1.isComplete)
b, err = tryMergeResponses(mct.mc, msg1, msg3)
assert.True(t, b)
assert.Nil(t, err)
assert.True(t, msg1.isComplete)
msg := msg1
assert.Equal(t, "k1", msg.keys[0].String())
assert.Equal(t, "k2", msg.keys[1].String())
assert.Equal(t, uint32(2), msg.count_values)
assert.Equal(t, 1, len(msg.values))
assert.Equal(t, "value1", msg.values[0].String())
}
func Test_TransactionComplete(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
trans := mct.genTransaction(
textParseNoFail(t, "set k 0 0 5\r\nvalue\r\n"),
textParseNoFail(t, "STORED\r\n"),
)
assert.Equal(t, common.OK_STATUS, trans.Status)
assert.Equal(t, uint64(20), trans.BytesOut)
assert.Equal(t, uint64(8), trans.BytesIn)
assert.Equal(t, trans, mct.transactions[0])
event := makeTransactionEvent(t, trans)
assert.Equal(t, "memcache", event["type"])
assert.Equal(t, common.OK_STATUS, event["status"])
}
func Test_TransactionRequestNoReply(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
trans := mct.genTransaction(
textParseNoFail(t, "set k 0 0 5 noreply\r\nvalue\r\n"),
nil,
)
assert.Equal(t, common.OK_STATUS, trans.Status)
assert.Equal(t, uint64(28), trans.BytesOut)
assert.Equal(t, uint64(0), trans.BytesIn)
assert.Equal(t, trans, mct.transactions[0])
event := makeTransactionEvent(t, trans)
assert.Equal(t, "memcache", event["type"])
assert.Equal(t, common.OK_STATUS, event["status"])
}
func Test_TransactionLostResponse(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
trans := mct.genTransaction(
textParseNoFail(t, "set k 0 0 5\r\nvalue\r\n"),
nil,
)
assert.Equal(t, common.SERVER_ERROR_STATUS, trans.Status)
assert.Equal(t, uint64(20), trans.BytesOut)
assert.Equal(t, uint64(0), trans.BytesIn)
assert.Equal(t, trans, mct.transactions[0])
event := makeTransactionEvent(t, trans)
assert.Equal(t, "memcache", event["type"])
assert.Equal(t, common.SERVER_ERROR_STATUS, event["status"])
}
func Test_TransactionLostRequest(t *testing.T) {
mct := newMemcacheTest(defaultConfig)
trans := mct.genTransaction(
nil,
textParseNoFail(t, "STORED\r\n"),
)
assert.Equal(t, common.CLIENT_ERROR_STATUS, trans.Status)
assert.Equal(t, uint64(0), trans.BytesOut)
assert.Equal(t, uint64(8), trans.BytesIn)
assert.Equal(t, trans, mct.transactions[0])
event := makeTransactionEvent(t, trans)
assert.Equal(t, "memcache", event["type"])
assert.Equal(t, common.CLIENT_ERROR_STATUS, event["status"])
}
| Test_TryMergeUnmergeableResponseWithStat |
cloud_lib_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cloud_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import mock
import requests
from official.utils.logs import cloud_lib
class CloudLibTest(unittest.TestCase):
@mock.patch("requests.get")
def test_on_gcp(self, mock_requests_get):
mock_response = mock.MagicMock()
mock_requests_get.return_value = mock_response
mock_response.status_code = 200
self.assertEqual(cloud_lib.on_gcp(), True)
@mock.patch("requests.get")
def test_not_on_gcp(self, mock_requests_get):
mock_requests_get.side_effect = requests.exceptions.ConnectionError()
self.assertEqual(cloud_lib.on_gcp(), False)
if __name__ == "__main__":
| unittest.main() |
|
train_a2c_mc.py | import torch
import torch.optim as optm
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from data.graph import Graph
from collections import namedtuple
SavedAction = namedtuple('SavedAction', ['log_prob', 'value_current'])
# Mont Carlo methods
class TrainModel_MC:
def __init__(self, model, train_dataset, val_dataset, max_grad_norm=2, use_cuda=False):
self.model = model
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.max_grad_norm = max_grad_norm
self.use_cuda = use_cuda
self.train_loader = DataLoader(train_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.val_loader = DataLoader(val_dataset, shuffle=True, num_workers=1, batch_size=1, collate_fn=lambda x: x)
self.epochs = 0
self.beta = 0.9
self.eps = np.finfo(np.float32).eps.item()
def train_and_validate(self, n_epochs, lr_actor, lr_critic, gamma=0.99, use_critic=True):
| self.actor_optim = optm.Adam(self.model.actor.parameters(), lr=lr_actor)
print(use_critic)
if use_critic:
self.critic_optim = optm.Adam(self.model.critic.parameters(), lr=lr_critic)
self.critic_loss_criterion = torch.nn.MSELoss()
else:
baseline = torch.zeros(1)
if self.use_cuda:
baseline = baseline.cuda()
for epoch in range(1):
n_graphs_proceed = 0
for X in self.train_loader:
for x in X:
self.model.train()
ratio_gcn2mind = []
ratio_gcn2rand = []
for epoch in range(n_epochs):
rewards_mindegree = 0 # number of added edges
rewards_random = 0
x_mind = Graph(x.M)
x_rand = Graph(x.M)
x_rl = Graph(x.M)
# loop for training while eliminating a graph iteratively
for i in range(x.n - 2):
# baseline1: compute return of min degree
if i % 100 == 0:
print('iterations {}'.format(i))
node_mind, d_min = x_mind.min_degree(x_mind.M)
rewards_mindegree += x_mind.eliminate_node(node_mind, reduce=True)
# baseline2: compute return of random
rewards_random += x_rand.eliminate_node(np.random.randint(low=0, high=x_rand.n), reduce=True)
# call actor-critic model
action, log_prob, reward, value_current, value_next, x_rl = self.model(x_rl) # forward propagation,action: node selected, reward: nb edges added
self.model.rewards.append(reward)
self.model.actions.append(action)
self.model.saved_actions.append(SavedAction(log_prob, value_current))
R = 0
actor_losses = []
critic_losses = []
returns = []
# compute sampled return for each step
for r in self.model.rewards[::-1]:
R = r + gamma * R
returns.insert(0, R)
returns = torch.tensor(returns)
returns = (returns - returns.mean()) / (returns.std() + self.eps)
saved_actions = self.model.saved_actions
# compute cummulated loss of actor and critic of one graph
for (log_prob, value_current), R in zip(saved_actions, returns):
if use_critic:
advantage = R - value_current
critic_losses.append(-value_current* advantage)
# critic_losses.append(self.critic_loss_criterion(value_current, torch.Tensor([R.detach()])))
else:
advantage = R - baseline
actor_losses.append(log_prob * advantage.detach()) # the return here is discounted nb of added edges,
# hence, it actually represents loss
# step update of actor
self.actor_optim.zero_grad()
actor_loss = torch.stack(actor_losses).sum()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# step update of critic
if use_critic:
self.critic_optim.zero_grad()
critic_closs = torch.stack(critic_losses).sum()
critic_closs.backward()
self.critic_optim.step()
else:
baseline = baseline.detach()
rewards_gcn = sum(self.model.rewards)
_ratio_gcn2mind = rewards_gcn / rewards_mindegree
_ratio_gcn2rand = rewards_gcn / rewards_random
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2mind ratio {}'.format(_ratio_gcn2mind),
'value {}'.format(saved_actions[0].value_current),
'R {}'.format(returns[0]))
print('graph {:04d}'.format(n_graphs_proceed), 'epoch {:04d}'.format(epoch),
'gcn2rand ratio {}'.format(_ratio_gcn2rand))
ratio_gcn2mind.append(_ratio_gcn2mind)
ratio_gcn2rand.append(_ratio_gcn2rand)
del self.model.rewards[:]
del self.model.actions[:]
del self.model.saved_actions[:]
ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)
ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)
min_ratio_gcn2mind = np.min(ratio_gcn2mind)
max_ratio_gcn2mind = np.max(ratio_gcn2mind)
av_ratio_gcn2mind = np.sum(ratio_gcn2mind)/ n_epochs
min_ratio_gcn2rand = np.min(ratio_gcn2rand)
max_ratio_gcn2rand = np.max(ratio_gcn2rand)
av_ratio_gcn2rand = np.sum(ratio_gcn2rand) / n_epochs
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2mind{:04d}',
'min_ratio {}'.format(min_ratio_gcn2mind),
'max_ratio {}'.format(max_ratio_gcn2mind),
'av_ratio {}'.format(av_ratio_gcn2mind))
print('graph {:04d}'.format(n_graphs_proceed), 'gcn2rand{:04d}',
'min_ratio {}'.format(min_ratio_gcn2rand),
'max_ratio {}'.format(max_ratio_gcn2rand),
'av_ratio {}'.format(av_ratio_gcn2rand),
'nb graph proceeded {}'.format(n_graphs_proceed))
n_graphs_proceed += len(X)
# ratio_gcn2mind = np.array(ratio_gcn2mind).reshape(-1)
# ratio_gcn2rand = np.array(ratio_gcn2rand).reshape(-1)
#
# total_ratio_gcn2mind = np.sum(ratio_gcn2mind)
# total_ratio_gcn2rand = np.sum(ratio_gcn2rand)
#
# min_ratio_gcn2mind = np.min(ratio_gcn2mind)
# max_ratio_gcn2mind = np.max(ratio_gcn2mind)
# av_ratio_gcn2mind = total_ratio_gcn2mind / n_graphs_proceed
#
# min_ratio_gcn2rand = np.min(ratio_gcn2rand)
# max_ratio_gcn2rand = np.max(ratio_gcn2rand)
# av_ratio_gcn2rand = total_ratio_gcn2rand / n_graphs_proceed
#
# print('epoch {:04d}'.format(epoch), 'gcn2mind{:04d}',
# 'min_ratio {}'.format(min_ratio_gcn2mind),
# 'max_ratio {}'.format(max_ratio_gcn2mind),
# 'av_ratio {}'.format(av_ratio_gcn2mind))
# print('epoch {:04d}'.format(epoch), 'gcn2rand{:04d}',
# 'min_ratio {}'.format(min_ratio_gcn2rand),
# 'max_ratio {}'.format(max_ratio_gcn2rand),
# 'av_ratio {}'.format(av_ratio_gcn2rand),
# 'nb graph proceeded {}'.format(n_graphs_proceed)) |
|
ResetUserPasswordCommand.ts | // smithy-typescript generated code
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
MiddlewareStack,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
import { FinspaceDataClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FinspaceDataClient";
import { ResetUserPasswordRequest, ResetUserPasswordResponse } from "../models/models_0";
import {
deserializeAws_restJson1ResetUserPasswordCommand,
serializeAws_restJson1ResetUserPasswordCommand,
} from "../protocols/Aws_restJson1";
export interface ResetUserPasswordCommandInput extends ResetUserPasswordRequest {}
export interface ResetUserPasswordCommandOutput extends ResetUserPasswordResponse, __MetadataBearer {}
/**
* <p>Resets the password for a specified user ID and generates a temporary one. Only a superuser can reset password for other users. Resetting the password immediately invalidates the previous password associated with the user.</p>
* @example
* Use a bare-bones client and the command you need to make an API call.
* ```javascript
* import { FinspaceDataClient, ResetUserPasswordCommand } from "@aws-sdk/client-finspace-data"; // ES Modules import
* // const { FinspaceDataClient, ResetUserPasswordCommand } = require("@aws-sdk/client-finspace-data"); // CommonJS import
* const client = new FinspaceDataClient(config);
* const command = new ResetUserPasswordCommand(input);
* const response = await client.send(command);
* ```
*
* @see {@link ResetUserPasswordCommandInput} for command's `input` shape.
* @see {@link ResetUserPasswordCommandOutput} for command's `response` shape.
* @see {@link FinspaceDataClientResolvedConfig | config} for FinspaceDataClient's `config` shape.
*
*/
export class ResetUserPasswordCommand extends $Command<
ResetUserPasswordCommandInput,
ResetUserPasswordCommandOutput,
FinspaceDataClientResolvedConfig
> { | // End section: command_properties
constructor(readonly input: ResetUserPasswordCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: FinspaceDataClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<ResetUserPasswordCommandInput, ResetUserPasswordCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "FinspaceDataClient";
const commandName = "ResetUserPasswordCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: ResetUserPasswordRequest.filterSensitiveLog,
outputFilterSensitiveLog: ResetUserPasswordResponse.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: ResetUserPasswordCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_restJson1ResetUserPasswordCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<ResetUserPasswordCommandOutput> {
return deserializeAws_restJson1ResetUserPasswordCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
} | // Start section: command_properties |
request.go | package httpserverutils
import (
"github.com/gorilla/mux"
"github.com/pkg/errors"
"io/ioutil"
"net/http"
)
// HandlerFunc is a handler function that is passed to the
// MakeHandler wrapper and gets the relevant request fields
// from it.
type HandlerFunc func(ctx *ServerContext, r *http.Request, routeParams map[string]string, queryParams map[string]string, requestBody []byte) (
interface{}, error)
// MakeHandler is a wrapper function that takes a handler in the form of HandlerFunc
// and returns a function that can be used as a handler in mux.Router.HandleFunc.
func MakeHandler(handler HandlerFunc) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
ctx := ToServerContext(r.Context())
var requestBody []byte
if r.Method == "POST" {
var err error
requestBody, err = ioutil.ReadAll(r.Body)
if err != nil {
SendErr(ctx, w, errors.New("Error reading POST data"))
}
}
flattenedQueryParams, err := flattenQueryParams(r.URL.Query())
if err != nil {
SendErr(ctx, w, err)
return
}
response, err := handler(ctx, r, mux.Vars(r), flattenedQueryParams, requestBody)
if err != nil {
SendErr(ctx, w, err)
return
}
if response != nil {
SendJSONResponse(w, response)
}
}
}
func | (queryParams map[string][]string) (map[string]string, error) {
flattenedMap := make(map[string]string)
for param, valuesSlice := range queryParams {
if len(valuesSlice) > 1 {
return nil, NewHandlerError(http.StatusUnprocessableEntity, errors.Errorf("Couldn't parse the '%s' query parameter:"+
" expected a single value but got multiple values", param))
}
flattenedMap[param] = valuesSlice[0]
}
return flattenedMap, nil
}
| flattenQueryParams |
globalnetworkpolicy.go | // Copyright (c) 2021 Tigera, Inc. All rights reserved.
// Code generated by lister-gen. DO NOT EDIT.
package v3
import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
v3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
)
// GlobalNetworkPolicyLister helps list GlobalNetworkPolicies.
// All objects returned here must be treated as read-only.
type GlobalNetworkPolicyLister interface {
// List lists all GlobalNetworkPolicies in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v3.GlobalNetworkPolicy, err error)
// Get retrieves the GlobalNetworkPolicy from the index for a given name.
// Objects returned here must be treated as read-only.
Get(name string) (*v3.GlobalNetworkPolicy, error)
GlobalNetworkPolicyListerExpansion
}
// globalNetworkPolicyLister implements the GlobalNetworkPolicyLister interface.
type globalNetworkPolicyLister struct {
indexer cache.Indexer
}
// NewGlobalNetworkPolicyLister returns a new GlobalNetworkPolicyLister.
func NewGlobalNetworkPolicyLister(indexer cache.Indexer) GlobalNetworkPolicyLister |
// List lists all GlobalNetworkPolicies in the indexer.
func (s *globalNetworkPolicyLister) List(selector labels.Selector) (ret []*v3.GlobalNetworkPolicy, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v3.GlobalNetworkPolicy))
})
return ret, err
}
// Get retrieves the GlobalNetworkPolicy from the index for a given name.
func (s *globalNetworkPolicyLister) Get(name string) (*v3.GlobalNetworkPolicy, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v3.Resource("globalnetworkpolicy"), name)
}
return obj.(*v3.GlobalNetworkPolicy), nil
}
| {
return &globalNetworkPolicyLister{indexer: indexer}
} |
github.rs | use super::{ParsedSegments, Parser};
use crate::{ParseError, Provider};
use std::str;
use url::Url;
#[derive(Debug, Eq, PartialEq)]
pub struct GitHubParser {}
impl Parser for GitHubParser {
fn provider(&self) -> Provider {
Provider::GitHub
}
fn | (&self, scheme: &str) -> bool {
matches!(
scheme,
"git" | "http" | "git+ssh" | "git+https" | "ssh" | "https"
)
}
fn extract<'a>(&self, url: &'a Url) -> Result<ParsedSegments<'a>, ParseError> {
// let [, user, project, type, committish] = url.pathname.split('/', 5)
let mut path_segments = url.path().splitn(5, '/');
let _ = path_segments.next();
let user = path_segments.next();
let project = path_segments.next();
let type_ = path_segments.next();
let mut committish = path_segments.next();
// if (type && type !== 'tree') {
// return
// }
//
// if (!type) {
// committish = url.hash.slice(1)
// }
if let Some(type_) = type_ {
if type_ != "tree" {
return Err(ParseError::UnknownUrl);
}
} else {
committish = url.fragment();
}
// if (project && project.endsWith('.git')) {
// project = project.slice(0, -4)
// }
let project = project.map(|project| project.strip_suffix(".git").unwrap_or(project));
// if (!user || !project) {
// return
// }
if user.is_none() || project.is_none() {
return Err(ParseError::UnknownUrl);
}
// return { user, project, committish }
Ok(ParsedSegments {
user,
project,
committish,
})
}
}
| supports_scheme |
calendar_ui.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
###########################################################
# KivyCalendar (X11/MIT License)
# Calendar & Date picker widgets for Kivy (http://kivy.org)
# https://bitbucket.org/xxblx/kivycalendar
#
# Oleg Kozlov (xxblx), 2015
# https://xxblx.bitbucket.org/
###########################################################
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.popup import Popup
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.textinput import TextInput
from kivy.uix.label import Label
from kivy.core.window import Window
from kivy.factory import Factory
from kivy.properties import NumericProperty, ReferenceListProperty
from kivyblocks.i18n import I18n
from . import calendar_data as cal_data
###########################################################
Builder.load_string("""
<ArrowButton>:
background_normal: ""
background_down: ""
background_color: 1, 1, 1, 0
size_hint: .1, .1
<MonthYearLabel>:
pos_hint: {"top": 1, "center_x": .5}
size_hint: None, 0.1
halign: "center"
<MonthsManager>:
pos_hint: {"top": .9}
size_hint: 1, .9
<ButtonsGrid>:
cols: 7
rows: 7
size_hint: 1, 1
pos_hint: {"top": 1}
<DayAbbrLabel>:
text_size: self.size[0], None
halign: "center"
<DayAbbrWeekendLabel>:
color: 1, 0, 0, 1
<DayButton>:
group: "day_num"
<DayNumWeekendButton>:
background_color: 1, 0, 0, 1
""")
###########################################################
class DatePicker(TextInput):
"""
Date picker is a textinput, if it focused shows popup with calendar
which allows you to define the popup dimensions using pHint_x, pHint_y,
and the pHint lists, for example in kv:
DatePicker:
pHint: 0.7,0.4
would result in a size_hint of 0.7,0.4 being used to create the popup
"""
def __init__(self, touch_switch=False, value=None, *args, **kwargs):
super(DatePicker, self).__init__(*args, **kwargs)
self.touch_switch = touch_switch
self.init_ui(value)
def getValue(self):
return self.text
def setValue(self, sdate):
self.text = sdate
d = [int(i) for i in sdate.split('-')]
d.reverse()
self.cal.active_date = d
def init_ui(self, value):
if not value:
value = cal_data.today_date()
d = [int(i) for i in value.split('.')]
value = '%04d-%02d-%02d' % (d[2],d[1],d[0])
# Calendar
self.cal = CalendarWidget(as_popup=True,
touch_switch=self.touch_switch)
self.setValue(value)
# Popup
self.popup = Popup(content=self.cal, on_dismiss=self.update_value,
size_hint=(0.7,0.7),
title="")
self.cal.parent_popup = self.popup
self.bind(focus=self.show_popup)
def show_popup(self, isnt, val):
"""
Open popup if textinput focused,
and regardless update the popup size_hint
"""
if val:
# Automatically dismiss the keyboard
# that results from the textInput
Window.release_all_keyboards()
self.popup.open()
def update_value(self, inst):
""" Update textinput value on popup close """
d = self.cal.active_date
self.text = "%04d-%02d-%02d" % (d[2],d[1],d[0])
self.focus = False
class CalendarWidget(RelativeLayout):
""" Basic calendar widget """
def __init__(self, as_popup=False, touch_switch=False, *args, **kwargs):
super(CalendarWidget, self).__init__(*args, **kwargs)
self.i18n = I18n()
self.as_popup = as_popup
self.touch_switch = touch_switch
self.prepare_data()
self.init_ui()
def init_ui(self):
self.left_arrow = ArrowButton(text="<", on_press=self.go_prev,
pos_hint={"top": 1, "left": 0})
self.right_arrow = ArrowButton(text=">", on_press=self.go_next,
pos_hint={"top": 1, "right": 1})
self.add_widget(self.left_arrow)
self.add_widget(self.right_arrow)
# Title
self.title_label = MonthYearLabel(text=self.title)
self.add_widget(self.title_label)
# ScreenManager
self.sm = MonthsManager()
self.add_widget(self.sm)
self.create_month_scr(self.quarter[1], toogle_today=True)
def create_month_scr(self, month, toogle_today=False):
""" Screen with calendar for one month """
scr = Screen()
m = self.month_names_eng[self.active_date[1] - 1]
scr.name = "%s-%s" % (m, self.active_date[2]) # like march-2015
# Grid for days
grid_layout = ButtonsGrid()
scr.add_widget(grid_layout)
# Days abbrs
for i in range(7):
if i >= 5: # weekends
#l = DayAbbrWeekendLabel(text=self.days_abrs[i])
l = Factory.Text(text=self.days_abrs[i], i18n=True)
else: # work days
#l = DayAbbrLabel(text=self.days_abrs[i])
l = Factory.Text(text=self.days_abrs[i], i18n=True)
grid_layout.add_widget(l)
# Buttons with days numbers
for week in month:
for day in week:
if day[1] >= 5: # weekends
tbtn = DayNumWeekendButton(text=str(day[0]))
else: # work days
tbtn = DayNumButton(text=str(day[0]))
tbtn.bind(on_press=self.get_btn_value)
if toogle_today:
# Down today button
if day[0] == self.active_date[0] and day[2] == 1:
tbtn.state = "down"
# Disable buttons with days from other months
if day[2] == 0:
tbtn.disabled = True
grid_layout.add_widget(tbtn)
self.sm.add_widget(scr)
def prepare_data(self):
""" Prepare data for showing on widget loading """
# Get days abbrs and month names lists
self.month_names = cal_data.get_month_names()
self.month_names_eng = cal_data.get_month_names_eng()
self.days_abrs = cal_data.get_days_abbrs()
# Today date
self.active_date = cal_data.today_date_list()
# Set title
self.title = "%s - %s" % (self.i18n(self.month_names[self.active_date[1] - 1]),
self.active_date[2])
# Quarter where current month in the self.quarter[1]
self.get_quarter()
def get_quarter(self):
""" Get caledar and months/years nums for quarter """
self.quarter_nums = cal_data.calc_quarter(self.active_date[2],
self.active_date[1])
self.quarter = cal_data.get_quarter(self.active_date[2],
self.active_date[1])
def get_btn_value(self, inst):
""" Get day value from pressed button """
self.active_date[0] = int(inst.text)
| if self.as_popup:
self.parent_popup.dismiss()
def go_prev(self, inst):
""" Go to screen with previous month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[0][1],
self.quarter_nums[0][0]]
# Name of prev screen
n = self.quarter_nums[0][1] - 1
prev_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[0][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(prev_scr_name):
self.create_month_scr(self.quarter[0])
self.sm.current = prev_scr_name
self.sm.transition.direction = "right"
self.get_quarter()
self.title = "%s - %s" % (self.i18n(self.month_names[self.active_date[1] - 1]),
self.active_date[2])
self.title_label.text = self.title
def go_next(self, inst):
""" Go to screen with next month """
# Change active date
self.active_date = [self.active_date[0], self.quarter_nums[2][1],
self.quarter_nums[2][0]]
# Name of prev screen
n = self.quarter_nums[2][1] - 1
next_scr_name = "%s-%s" % (self.month_names_eng[n],
self.quarter_nums[2][0])
# If it's doen't exitst, create it
if not self.sm.has_screen(next_scr_name):
self.create_month_scr(self.quarter[2])
self.sm.current = next_scr_name
self.sm.transition.direction = "left"
self.get_quarter()
self.title = "%s - %s" % (self.i18n(self.month_names[self.active_date[1] - 1]),
self.active_date[2])
self.title_label.text = self.title
def on_touch_move(self, touch):
""" Switch months pages by touch move """
if self.touch_switch:
# Left - prev
if touch.dpos[0] < -30:
self.go_prev(None)
# Right - next
elif touch.dpos[0] > 30:
self.go_next(None)
class ArrowButton(Button):
pass
class MonthYearLabel(Label):
pass
class MonthsManager(ScreenManager):
pass
class ButtonsGrid(GridLayout):
pass
class DayAbbrLabel(Label):
pass
class DayAbbrWeekendLabel(DayAbbrLabel):
pass
class DayButton(ToggleButton):
pass
class DayNumButton(DayButton):
pass
class DayNumWeekendButton(DayButton):
pass | |
FluentTheme.ts | export { FluentTheme } from '@fluentui/theme'; |
||
indicators.py | import os
import sys
from math import pi
import numpy as np
import param
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from tqdm.asyncio import tqdm as _tqdm
from ..layout import Column, Row
from ..models import (
HTML, Progress as _BkProgress, TrendIndicator as _BkTrendIndicator
)
from ..pane.markup import Str
from ..reactive import SyncableData
from ..util import escape, updating
from ..viewable import Viewable
from .base import Widget
RED = "#d9534f"
GREEN = "#5cb85c"
BLUE = "#428bca"
class Indicator(Widget):
"""
Indicator is a baseclass for widgets which indicate some state.
"""
sizing_mode = param.ObjectSelector(default='fixed', objects=[
'fixed', 'stretch_width', 'stretch_height', 'stretch_both',
'scale_width', 'scale_height', 'scale_both', None])
__abstract = True
class BooleanIndicator(Indicator):
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
__abstract = True
class BooleanStatus(BooleanIndicator):
color = param.ObjectSelector(default='dark', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
height = param.Integer(default=20, doc="""
height of the circle.""")
width = param.Integer(default=20, doc="""
Width of the circle.""")
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
_rename = {'color': None}
_source_transforms = {'value': None}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
value = msg.pop('value', None)
if value is None:
return msg
msg['css_classes'] = ['dot-filled', self.color] if value else ['dot']
return msg
class LoadingSpinner(BooleanIndicator):
bgcolor = param.ObjectSelector(default='light', objects=['dark', 'light'])
color = param.ObjectSelector(default='dark', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
height = param.Integer(default=125, doc="""
height of the circle.""")
width = param.Integer(default=125, doc="""
Width of the circle.""")
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
_rename = {'color': None, 'bgcolor': None}
_source_transforms = {'value': None}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
value = msg.pop('value', None)
if value is None:
return msg
color_cls = f'{self.color}-{self.bgcolor}'
msg['css_classes'] = ['loader', 'spin', color_cls] if value else ['loader', self.bgcolor]
return msg
class ValueIndicator(Indicator):
"""
A ValueIndicator provides a visual representation for a numeric
value.
"""
value = param.Number(default=None, allow_None=True)
__abstract = True
class Progress(ValueIndicator):
active = param.Boolean(default=True, doc="""
If no value is set the active property toggles animation of the
progress bar on and off.""")
bar_color = param.ObjectSelector(default='success', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
max = param.Integer(default=100, doc="The maximum value of the progress bar.")
value = param.Integer(default=None, bounds=(-1, None), doc="""
The current value of the progress bar. If set to None the progress
bar will be indeterminate and animate depending on the active
parameter. If set to -1 the progress bar will be empty.""")
_rename = {'name': None}
_widget_type = _BkProgress
@param.depends('max', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = (-1, self.max)
def __init__(self,**params):
super().__init__(**params)
self._update_value_bounds()
class Number(ValueIndicator):
"""
The Number indicator renders the value as text optionally colored
according to the color thresholds.
"""
default_color = param.String(default='black')
colors = param.List(default=None)
format = param.String(default='{value}')
font_size = param.String(default='54pt')
nan_format = param.String(default='-', doc="""
How to format nan values.""")
title_size = param.String(default='18pt')
_rename = {}
_source_transforms = {
'value': None, 'colors': None, 'default_color': None,
'font_size': None, 'format': None, 'nan_format': None,
'title_size': None
}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
font_size = msg.pop('font_size', self.font_size)
title_font_size = msg.pop('title_size', self.title_size)
name = msg.pop('name', self.name)
format = msg.pop('format', self.format)
value = msg.pop('value', self.value)
nan_format = msg.pop('nan_format', self.nan_format)
color = msg.pop('default_color', self.default_color)
colors = msg.pop('colors', self.colors)
for val, clr in (colors or [])[::-1]:
if value is not None and value <= val:
color = clr
if value is None:
value = float('nan')
value = format.format(value=value).replace('nan', nan_format)
text = f'<div style="font-size: {font_size}; color: {color}">{value}</div>'
if self.name:
title_font_size = msg.pop('title_size', self.title_size)
text = f'<div style="font-size: {title_font_size}; color: {color}">{name}</div>\n{text}'
msg['text'] = escape(text)
return msg
class String(ValueIndicator):
"""
The String indicator renders a string with a title.
"""
default_color = param.String(default='black')
font_size = param.String(default='54pt')
title_size = param.String(default='18pt')
value = param.String(default=None, allow_None=True)
_rename = {}
_source_transforms = {
'value': None, 'default_color': None, 'font_size': None, 'title_size': None
}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
font_size = msg.pop('font_size', self.font_size)
title_font_size = msg.pop('title_size', self.title_size)
name = msg.pop('name', self.name)
value = msg.pop('value', self.value)
color = msg.pop('default_color', self.default_color)
text = f'<div style="font-size: {font_size}; color: {color}">{value}</div>'
if self.name:
title_font_size = msg.pop('title_size', self.title_size)
text = f'<div style="font-size: {title_font_size}; color: {color}">{name}</div>\n{text}'
msg['text'] = escape(text)
return msg
class Gauge(ValueIndicator):
"""
A Gauge represents a value in some range as a position on
speedometer or gauge. It is similar to a Dial but visually a lot
busier.
"""
annulus_width = param.Integer(default=10, doc="""
Width of the gauge annulus.""")
bounds = param.Range(default=(0, 100), doc="""
The upper and lower bound of the dial.""")
colors = param.List(default=None, doc="""
Color thresholds for the Gauge, specified as a list of tuples
of the fractional threshold and the color to switch to.""")
custom_opts = param.Dict(doc="""
Additional options to pass to the ECharts Gauge definition.""")
height = param.Integer(default=300, bounds=(0, None))
end_angle = param.Number(default=-45, doc="""
Angle at which the gauge ends.""")
format = param.String(default='{value}%', doc="""
Formatting string for the value indicator.""")
num_splits = param.Integer(default=10, doc="""
Number of splits along the gauge.""")
show_ticks = param.Boolean(default=True, doc="""
Whether to show ticks along the dials.""")
show_labels = param.Boolean(default=True, doc="""
Whether to show tick labels along the dials.""")
start_angle = param.Number(default=225, doc="""
Angle at which the gauge starts.""")
tooltip_format = param.String(default='{b} : {c}%', doc="""
Formatting string for the hover tooltip.""")
title_size = param.Integer(default=18, doc="""
Size of title font.""")
value = param.Number(default=25, doc="""
Value to indicate on the gauge a value within the declared bounds.""")
width = param.Integer(default=300, bounds=(0, None))
_rename = {}
_source_transforms = {
'annulus_width': None, 'bounds': None, 'colors': None,
'custom_opts': None, 'end_angle': None, 'format': None,
'num_splits': None, 'show_ticks': None, 'show_labels': None,
'start_angle': None, 'tooltip_format': None, 'title_size': None,
'value': None
}
@property
def _widget_type(self):
if 'panel.models.echarts' not in sys.modules:
from ..models.echarts import ECharts
else:
ECharts = getattr(sys.modules['panel.models.echarts'], 'ECharts')
return ECharts
def __init__(self, **params):
super().__init__(**params)
self._update_value_bounds()
@param.depends('bounds', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = self.bounds
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
vmin, vmax = msg.pop('bounds', self.bounds)
msg['data'] = {
'tooltip': {
'formatter': msg.pop('tooltip_format', self.tooltip_format)
},
'series': [{
'name': 'Gauge',
'type': 'gauge',
'axisTick': {'show': msg.pop('show_ticks', self.show_ticks)},
'axisLabel': {'show': msg.pop('show_labels', self.show_labels)},
'title': {'fontWeight': 'bold', 'fontSize': msg.pop('title_size', self.title_size)},
'splitLine': {'show': True},
'radius': '100%',
'detail': {'formatter': msg.pop('format', self.format)},
'min': vmin,
'max': vmax,
'startAngle': msg.pop('start_angle', self.start_angle),
'endAngle': msg.pop('end_angle', self.end_angle),
'splitNumber': msg.pop('num_splits', self.num_splits),
'data': [{'value': msg.pop('value', self.value), 'name': self.name}],
'axisLine': {
'lineStyle': {
'width': msg.pop('annulus_width', self.annulus_width),
}
}
}]
}
colors = msg.pop('colors', self.colors)
if colors:
msg['data']['series'][0]['axisLine']['lineStyle']['color'] = colors
custom_opts = msg.pop('custom_opts', self.custom_opts)
if custom_opts:
gauge = msg['data']['series'][0]
for k, v in custom_opts.items():
if k not in gauge or not isinstance(gauge[k], dict):
gauge[k] = v
else:
gauge[k].update(v)
return msg
class Dial(ValueIndicator):
"""
A Dial represents a value in some range as a position on an
annular dial. It is similar to a Gauge but more minimal visually.
"""
annulus_width = param.Number(default=0.2, doc="""
Width of the radial annulus as a fraction of the total.""")
bounds = param.Range(default=(0, 100), doc="""
The upper and lower bound of the dial.""")
colors = param.List(default=None, doc="""
Color thresholds for the Dial, specified as a list of tuples
of the fractional threshold and the color to switch to.""")
default_color = param.String(default='lightblue', doc="""
Color of the radial annulus if not color thresholds are supplied.""")
end_angle = param.Number(default=25, doc="""
Angle at which the dial ends.""")
format = param.String(default='{value}%', doc="""
Formatting string for the value indicator and lower/upper bounds.""")
height = param.Integer(default=250, bounds=(1, None))
nan_format = param.String(default='-', doc="""
How to format nan values.""")
needle_color = param.String(default='black', doc="""
Color of the Dial needle.""")
needle_width = param.Number(default=0.1, doc="""
Radial width of the needle.""")
start_angle = param.Number(default=-205, doc="""
Angle at which the dial starts.""")
tick_size = param.String(default=None, doc="""
Font size of the Dial min/max labels.""")
title_size = param.String(default=None, doc="""
Font size of the Dial title.""")
unfilled_color = param.String(default='whitesmoke', doc="""
Color of the unfilled region of the Dial.""")
value_size = param.String(default=None, doc="""
Font size of the Dial value label.""")
value = param.Number(default=25, allow_None=True, doc="""
Value to indicate on the dial a value within the declared bounds.""")
width = param.Integer(default=250, bounds=(1, None))
_manual_params = [
'value', 'start_angle', 'end_angle', 'bounds',
'annulus_width', 'format', 'background', 'needle_width',
'tick_size', 'title_size', 'value_size', 'colors',
'default_color', 'unfilled_color', 'height',
'width', 'nan_format', 'needle_color'
]
_data_params = _manual_params
_rename = {'background': 'background_fill_color'}
def __init__(self, **params):
super().__init__(**params)
self._update_value_bounds()
@param.depends('bounds', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = self.bounds
def _get_data(self):
vmin, vmax = self.bounds
value = self.value
if value is None:
value = float('nan')
fraction = (value-vmin)/(vmax-vmin)
start = (np.radians(360-self.start_angle) - pi % (2*pi)) + pi
end = (np.radians(360-self.end_angle) - pi % (2*pi)) + pi
distance = (abs(end-start) % (pi*2))
if end>start:
distance = (pi*2)-distance
radial_fraction = distance*fraction
angle = start if np.isnan(fraction) else (start-radial_fraction)
inner_radius = 1-self.annulus_width
color = self.default_color
for val, clr in (self.colors or [])[::-1]:
if fraction <= val:
color = clr
annulus_data = {
'starts': np.array([start, angle]),
'ends' : np.array([angle, end]),
'color': [color, self.unfilled_color],
'radius': np.array([inner_radius, inner_radius])
}
x0s, y0s, x1s, y1s, clrs = [], [], [], [], []
colors = self.colors or []
for (val, _), (_, clr) in zip(colors[:-1], colors[1:]):
tangle = start-(distance*val)
if (vmin + val * (vmax-vmin)) <= value:
continue
x0, y0 = np.cos(tangle), np.sin(tangle)
x1, y1 = x0*inner_radius, y0*inner_radius | x0s.append(x0)
y0s.append(y0)
x1s.append(x1)
y1s.append(y1)
clrs.append(clr)
threshold_data = {
'x0': x0s, 'y0': y0s, 'x1': x1s, 'y1': y1s, 'color': clrs
}
center_radius = 1-self.annulus_width/2.
x, y = np.cos(angle)*center_radius, np.sin(angle)*center_radius
needle_start = pi+angle-(self.needle_width/2.)
needle_end = pi+angle+(self.needle_width/2.)
needle_data = {
'x': np.array([x]),
'y': np.array([y]),
'start': np.array([needle_start]),
'end': np.array([needle_end]),
'radius': np.array([center_radius])
}
value = self.format.format(value=value).replace('nan', self.nan_format)
min_value = self.format.format(value=vmin)
max_value = self.format.format(value=vmax)
tminx, tminy = np.cos(start)*center_radius, np.sin(start)*center_radius
tmaxx, tmaxy = np.cos(end)*center_radius, np.sin(end)*center_radius
tmin_angle, tmax_angle = start+pi, end+pi % pi
scale = (self.height/400)
title_size = self.title_size if self.title_size else '%spt' % (scale*32)
value_size = self.value_size if self.value_size else '%spt' % (scale*48)
tick_size = self.tick_size if self.tick_size else '%spt' % (scale*18)
text_data= {
'x': np.array([0, 0, tminx, tmaxx]),
'y': np.array([-.2, -.5, tminy, tmaxy]),
'text': [self.name, value, min_value, max_value],
'rot': np.array([0, 0, tmin_angle, tmax_angle]),
'size': [title_size, value_size, tick_size, tick_size],
'color': ['black', color, 'black', 'black']
}
return annulus_data, needle_data, threshold_data, text_data
def _get_model(self, doc, root=None, parent=None, comm=None):
params = self._process_param_change(self._init_params())
model = figure(
x_range=(-1,1), y_range=(-1,1), tools=[],
outline_line_color=None, toolbar_location=None,
width=self.width, height=self.height, **params
)
model.xaxis.visible = False
model.yaxis.visible = False
model.grid.visible = False
annulus, needle, threshold, text = self._get_data()
# Draw annulus
annulus_source = ColumnDataSource(data=annulus, name='annulus_source')
model.annular_wedge(
x=0, y=0, inner_radius='radius', outer_radius=1, start_angle='starts',
end_angle='ends', line_color='gray', color='color', direction='clock',
source=annulus_source
)
# Draw needle
needle_source = ColumnDataSource(data=needle, name='needle_source')
model.wedge(
x='x', y='y', radius='radius', start_angle='start', end_angle='end',
fill_color=self.needle_color, line_color=self.needle_color,
source=needle_source, name='needle_renderer'
)
# Draw thresholds
threshold_source = ColumnDataSource(data=threshold, name='threshold_source')
model.segment(
x0='x0', x1='x1', y0='y0', y1='y1', line_color='color', source=threshold_source,
line_width=2
)
# Draw labels
text_source = ColumnDataSource(data=text, name='label_source')
model.text(
x='x', y='y', text='text', font_size='size', text_align='center',
text_color='color', source=text_source, text_baseline='top',
angle='rot'
)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
def _manual_update(self, events, model, doc, root, parent, comm):
update_data = False
for event in events:
if event.name in ('width', 'height'):
model.update(**{event.name: event.new})
if event.name in self._data_params:
update_data = True
elif event.name == 'needle_color':
needle_r = model.select(name='needle_renderer')
needle_r.glyph.line_color = event.new
needle_r.glyph.fill_color = event.new
if not update_data:
return
annulus, needle, threshold, labels = self._get_data()
model.select(name='annulus_source').data.update(annulus)
model.select(name='needle_source').data.update(needle)
model.select(name='threshold_source').data.update(threshold)
model.select(name='label_source').data.update(labels)
class Trend(SyncableData, Indicator):
"""
The Trend indicator enables the user to display a Dashboard KPI Card.
The card can be layout out as:
* a column (text and plot on top of each other) or
* a row (text and plot after each other)
The text section is responsive and resizes on window resize.
"""
data = param.Parameter(doc="""
The plot data declared as a dictionary of arrays or a DataFrame.""")
layout = param.ObjectSelector(default="column", objects=["column", "row"])
plot_x = param.String(default="x", doc="""
The name of the key in the plot_data to use on the x-axis.""")
plot_y = param.String(default="y", doc="""
The name of the key in the plot_data to use on the y-axis.""")
plot_color = param.String(default=BLUE, doc="""
The color to use in the plot.""")
plot_type = param.ObjectSelector(default="bar", objects=["line", "step", "area", "bar"], doc="""
The plot type to render the plot data as.""")
pos_color = param.String(GREEN, doc="""
The color used to indicate a positive change.""")
neg_color = param.String(RED, doc="""
The color used to indicate a negative change.""")
title = param.String(doc="""The title or a short description of the card""")
value = param.Parameter(default='auto', doc="""
The primary value to be displayed.""")
value_change = param.Parameter(default='auto', doc="""
A secondary value. For example the change in percent.""")
_data_params = ['data']
_manual_params = ['data']
_rename = {'data': None, 'selection': None}
_widget_type = _BkTrendIndicator
def _get_data(self):
if self.data is None:
return None, {self.plot_x: [], self.plot_y: []}
elif isinstance(self.data, dict):
return self.data, self.data
return self.data, ColumnDataSource.from_df(self.data)
def _init_params(self):
props = super()._init_params()
self._processed, self._data = self._get_data()
props['source'] = ColumnDataSource(data=self._data)
return props
def _trigger_auto_values(self):
trigger = []
if self.value == 'auto':
trigger.append('value')
if self.value_change == 'auto':
trigger.append('value_change')
if trigger:
self.param.trigger(*trigger)
@updating
def _stream(self, stream, rollover=None):
self._trigger_auto_values()
super()._stream(stream, rollover)
def _update_cds(self, *events):
super()._update_cds(*events)
self._trigger_auto_values()
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
ys = self._data.get(self.plot_y, [])
if 'value' in msg and msg['value'] == 'auto':
if len(ys):
msg['value'] = ys[-1]
else:
msg['value'] = 0
if 'value_change' in msg and msg['value_change'] == 'auto':
if len(ys) > 1:
y1, y2 = self._data.get(self.plot_y)[-2:]
msg['value_change'] = 0 if y1 == 0 else (y2/y1 - 1)
else:
msg['value_change'] = 0
return msg
MARGIN = {
"text_pane": {"column": (5, 10, 0, 10), "row": (0, 10, 0, 10)},
"progress": {"column": (0, 10, 5, 10), "row": (12, 10, 0, 10)},
}
class ptqdm(_tqdm):
def __init__(self, *args, **kwargs):
self._indicator = kwargs.pop('indicator')
super().__init__(*args, **kwargs)
def display(self, msg=None, pos=None, bar_style=None):
super().display(msg, pos)
style = self._indicator.text_pane.style or {}
color = self.colour or 'black'
self._indicator.text_pane.style = dict(style, color=color)
if self.total is not None and self.n is not None:
self._indicator.max = int(self.total) # Can be numpy.int64
self._indicator.value = int(self.n)
self._indicator.text = self._to_text(**self.format_dict)
return True
def _to_text(self, n, total, **kwargs):
return self.format_meter(n, total, **{**kwargs, "ncols": 0})
def close(self):
super().close()
if not self.leave:
self._indicator.reset()
return _tqdm
class Tqdm(Indicator):
layout = param.ClassSelector(class_=(Column, Row), precedence=-1, constant=True, doc="""
The layout for the text and progress indicator.""",)
max = param.Integer(default=100, doc="""
The maximum value of the progress indicator.""")
progress = param.ClassSelector(class_=Progress, precedence=-1, doc="""
The Progress indicator used to display the progress.""",)
text = param.String(default='', doc="""
The current tqdm style progress text.""")
text_pane = param.ClassSelector(class_=Str, precedence=-1, doc="""
The pane to display the text to.""")
value = param.Integer(default=0, bounds=(0, None), doc="""
The current value of the progress bar. If set to None the progress
bar will be indeterminate and animate depending on the active
parameter.""")
margin = param.Parameter(default=0, doc="""
Allows to create additional space around the component. May
be specified as a two-tuple of the form (vertical, horizontal)
or a four-tuple (top, right, bottom, left).""")
width = param.Integer(default=400, bounds=(0, None), doc="""
The width of the component (in pixels). This can be either
fixed or preferred width, depending on width sizing policy.""")
write_to_console = param.Boolean(default=False, doc="""
Whether or not to also write to the console.""")
_layouts = {Row: 'row', Column: 'column'}
_rename = {'value': None, 'min': None, 'max': None, 'text': None}
def __init__(self, **params):
layout = params.pop('layout', 'column')
layout = self._layouts.get(layout, layout)
if "text_pane" not in params:
sizing_mode = 'stretch_width' if layout == 'column' else 'fixed'
params["text_pane"] = Str(
None, min_height=20, min_width=280, sizing_mode=sizing_mode,
margin=MARGIN["text_pane"][layout],
)
if "progress" not in params:
params["progress"] = Progress(
active=False,
sizing_mode="stretch_width",
min_width=100,
margin=MARGIN["progress"][layout],
)
layout_params = {p: params.get(p, getattr(self, p)) for p in Viewable.param}
if layout == 'row' or layout is Row:
params['layout'] = Row(
params['progress'], params['text_pane'], **layout_params
)
else:
params['layout'] = Column(
params['text_pane'], params['progress'], **layout_params
)
super().__init__(**params)
self.param.watch(self._update_layout, list(Viewable.param))
if self.value == 0:
# Hack: to give progress the initial look
self.progress.max = 100000
self.progress.value = 1
else:
self.progress.max = self.max
self.progress.value = self.value
self.text_pane.object = self.text
def _get_model(self, doc, root=None, parent=None, comm=None):
model = self.layout._get_model(doc, root, parent, comm)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
def _cleanup(self, root):
super()._cleanup(root)
self.layout._cleanup(root)
def _update_layout(self, *events):
self.layout.param.set_param(**{event.name: event.new for event in events})
@param.depends("text", watch=True)
def _update_text(self):
if self.text_pane:
self.text_pane.object = self.text
@param.depends("value", watch=True)
def _update_value(self):
if self.progress:
self.progress.value = self.value
@param.depends("max", watch=True)
def _update_max(self):
if self.progress:
self.progress.max = self.max
def __call__(self, *args, **kwargs):
kwargs['indicator'] = self
if not self.write_to_console:
f = open(os.devnull, 'w')
kwargs['file'] = f
return ptqdm(*args, **kwargs)
__call__.__doc__ = ptqdm.__doc__
def pandas(self, *args, **kwargs):
kwargs['indicator'] = self
if not self.write_to_console and 'file' not in kwargs:
f = open(os.devnull, 'w')
kwargs['file'] = f
return ptqdm.pandas(*args, **kwargs)
def reset(self):
"""Resets the parameters"""
self.value = self.param.value.default
self.text = self.param.text.default | |
event_controller.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use crate::PropagationPhase;
use crate::Widget;
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
glib::wrapper! {
pub struct EventController(Object<ffi::GtkEventController, ffi::GtkEventControllerClass>);
match fn {
type_ => || ffi::gtk_event_controller_get_type(),
}
}
pub const NONE_EVENT_CONTROLLER: Option<&EventController> = None;
pub trait EventControllerExt: 'static {
#[doc(alias = "gtk_event_controller_get_propagation_phase")]
#[doc(alias = "get_propagation_phase")]
fn propagation_phase(&self) -> PropagationPhase;
#[doc(alias = "gtk_event_controller_get_widget")]
#[doc(alias = "get_widget")]
fn widget(&self) -> Option<Widget>;
#[doc(alias = "gtk_event_controller_handle_event")]
fn handle_event(&self, event: &gdk::Event) -> bool;
#[doc(alias = "gtk_event_controller_reset")]
fn reset(&self);
#[doc(alias = "gtk_event_controller_set_propagation_phase")]
fn set_propagation_phase(&self, phase: PropagationPhase);
#[doc(alias = "propagation-phase")]
fn connect_propagation_phase_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<EventController>> EventControllerExt for O {
fn propagation_phase(&self) -> PropagationPhase {
unsafe {
from_glib(ffi::gtk_event_controller_get_propagation_phase(
self.as_ref().to_glib_none().0,
))
}
}
fn widget(&self) -> Option<Widget> {
unsafe {
from_glib_none(ffi::gtk_event_controller_get_widget(
self.as_ref().to_glib_none().0,
))
}
}
fn handle_event(&self, event: &gdk::Event) -> bool {
unsafe {
from_glib(ffi::gtk_event_controller_handle_event(
self.as_ref().to_glib_none().0,
event.to_glib_none().0,
))
}
}
fn reset(&self) {
unsafe {
ffi::gtk_event_controller_reset(self.as_ref().to_glib_none().0);
}
}
fn set_propagation_phase(&self, phase: PropagationPhase) {
unsafe {
ffi::gtk_event_controller_set_propagation_phase(
self.as_ref().to_glib_none().0,
phase.into_glib(),
);
}
}
#[doc(alias = "propagation-phase")]
fn connect_propagation_phase_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_propagation_phase_trampoline<P, F: Fn(&P) + 'static>(
this: *mut ffi::GtkEventController,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) where
P: IsA<EventController>,
|
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::propagation-phase\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_propagation_phase_trampoline::<Self, F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for EventController {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("EventController")
}
}
| {
let f: &F = &*(f as *const F);
f(&EventController::from_glib_borrow(this).unsafe_cast_ref())
} |
sha1.go | package ubiquity
import (
"crypto/x509"
"fmt"
"time"
"github.com/redNixon/cfssl/helpers"
)
// DeprecationSeverity encodes the severity of a deprecation policy
type DeprecationSeverity int
const (
// None indicates there is no deprecation
None DeprecationSeverity = iota
// Low indicates the deprecation policy won't affect user experience
Low
// Medium indicates the deprecation policy will affect user experience
// either in a minor way or for a limited scope of users.
Medium
// High indicates the deprecation policy will strongly affect user experience
High
)
// SHA1DeprecationPolicy encodes how a platform deprecates the support of SHA1
type SHA1DeprecationPolicy struct {
// the name of platform
Platform string `json:"platform"`
// policy severity, policies of the same platform will only trigger the one of highest severity
Severity DeprecationSeverity `json:"severity"`
// a human readable message describing the deprecation effects
Description string `json:"description"`
// the date when the policy is effective. zero value means effective immediately
EffectiveDate time.Time `json:"effective_date"`
// the expiry deadline indicates the latest date which a end-entity
// certificate with SHA1 can be valid through.
ExpiryDeadline time.Time `json:"expiry_deadline"`
// the date beyond which SHA1 cert should not be issued.
NeverIssueAfter time.Time `json:"never_issue_after"`
}
// SHA1DeprecationPolicys ia a list of various SHA1DeprecationPolicy's
// proposed by major browser producers
var SHA1DeprecationPolicys = []SHA1DeprecationPolicy{
// Chrome:
// if the leaf certificate expires between 01-01-2016 and 01-01-2017
// and the chain (excluding root) contains SHA-1 cert, show "minor errors".
{
Platform: "Google Chrome",
Description: "shows the SSL connection has minor problems",
Severity: Medium,
ExpiryDeadline: time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC),
},
// Chrome:
// if the leaf certificate expires after Jan. 1st 2017
// and the chain (excluding root) contains SHA-1 cert, show "untrusted SSL".
{
Platform: "Google Chrome",
Description: "shows the SSL connection is untrusted",
Severity: High,
ExpiryDeadline: time.Date(2017, time.January, 1, 0, 0, 0, 0, time.UTC),
},
// Mozilla Firefox:
// if the leaf certificate expires after Jan. 1st 2017, and
// the chain (excluding root) contains SHA-1 cert, show a warning in the developer console.
{
Platform: "Mozilla Firefox",
Description: "gives warning in the developer console",
Severity: Low,
ExpiryDeadline: time.Date(2017, time.January, 1, 0, 0, 0, 0, time.UTC),
},
// Mozilla Firefox:
// if a new certificate is issued after Jan. 1st 2016, and
// it is a SHA-1 cert, reject it.
{
Platform: "Mozilla Firefox",
Description: "shows the SSL connection is untrusted",
Severity: Medium,
EffectiveDate: time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC),
NeverIssueAfter: time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC),
},
// Mozilla Firefox:
// deprecate all valid SHA-1 cert chain on Jan. 1st 2017
{
Platform: "Mozilla Firefox",
Description: "shows the SSL connection is untrusted",
Severity: High,
EffectiveDate: time.Date(2017, time.January, 1, 0, 0, 0, 0, time.UTC),
ExpiryDeadline: time.Date(2017, time.January, 1, 0, 0, 0, 0, time.UTC),
},
// Microsoft Windows:
// deprecate all valid SHA-1 cert chain on Jan. 1st 2017
{
Platform: "Microsoft Windows Vista and later",
Description: "shows the SSL connection is untrusted",
Severity: High,
EffectiveDate: time.Date(2017, time.January, 1, 0, 0, 0, 0, time.UTC),
ExpiryDeadline: time.Date(2017, time.January, 1, 0, 0, 0, 0, time.UTC),
},
}
// Flag returns whether the policy flags the cert chain as deprecated for matching its deprecation criteria
func (p SHA1DeprecationPolicy) Flag(chain []*x509.Certificate) bool {
leaf := chain[0]
if time.Now().After(p.EffectiveDate) {
// Reject newly issued leaf certificate with SHA-1 after the specified deadline.
if !p.NeverIssueAfter.IsZero() && leaf.NotBefore.After(p.NeverIssueAfter) {
// Check hash algorithm of non-root leaf cert.
if len(chain) > 1 && helpers.HashAlgoString(leaf.SignatureAlgorithm) == "SHA1" {
return true
}
}
// Reject certificate chain with SHA-1 that are still valid after expiry deadline.
if !p.ExpiryDeadline.IsZero() && leaf.NotAfter.After(p.ExpiryDeadline) {
// Check hash algorithm of non-root certs.
for i, cert := range chain {
if i < len(chain)-1 {
if helpers.HashAlgoString(cert.SignatureAlgorithm) == "SHA1" {
return true
}
}
}
}
}
return false
}
// SHA1DeprecationMessages returns a list of human-readable messages. Each message describes
// how one platform rejects the chain based on SHA1 deprecation policies.
func | (chain []*x509.Certificate) []string {
// record the most severe deprecation policy by each platform
selectedPolicies := map[string]SHA1DeprecationPolicy{}
for _, policy := range SHA1DeprecationPolicys {
if policy.Flag(chain) {
// only keep the policy with highest severity
if selectedPolicies[policy.Platform].Severity < policy.Severity {
selectedPolicies[policy.Platform] = policy
}
}
}
// build the message list
list := []string{}
for _, policy := range selectedPolicies {
if policy.Severity > None {
list = append(list, fmt.Sprintf("%s %s due to SHA-1 deprecation", policy.Platform, policy.Description))
}
}
return list
}
| SHA1DeprecationMessages |
info.rs | //! Fetch info of all running containers concurrently
use bollard::container::{InspectContainerOptions, ListContainersOptions};
use bollard::models::ContainerSummary;
use bollard::Docker;
use std::collections::HashMap;
use std::default::Default;
use futures_util::stream;
use futures_util::stream::StreamExt;
async fn conc(arg: (Docker, &ContainerSummary)) {
let (docker, container) = arg;
println!(
"{:?}",
docker
.inspect_container(
container.id.as_ref().unwrap(),
None::<InspectContainerOptions>
)
.await
.unwrap()
)
} | let docker = Docker::connect_with_socket_defaults().unwrap();
let mut list_container_filters = HashMap::new();
list_container_filters.insert("status", vec!["running"]);
let containers = &docker
.list_containers(Some(ListContainersOptions {
all: true,
filters: list_container_filters,
..Default::default()
}))
.await?;
let docker_stream = stream::repeat(docker);
docker_stream
.zip(stream::iter(containers))
.for_each_concurrent(2, conc)
.await;
Ok(())
} |
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + 'static>> { |
axios.ts | import axios,{AxiosInstance, AxiosRequestConfig, AxiosResponse} from 'axios';
const instance : AxiosInstance = axios.create({
baseURL: ` http://127.0.0.1:3500`
})
instance.interceptors.request.use((request:AxiosRequestConfig) =>{
request.headers[`My authentication`] = 'AUTH092332'
console.log(request.url)
return request;
} );
instance.interceptors.response.use((response: AxiosResponse)=>{
return response;
}, error =>{
return Promise.reject(error); | });
export default instance; |
|
_blob_containers_operations.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BlobContainersOperations:
"""BlobContainersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2018_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
account_name: str,
**kwargs
) -> "_models.ListContainerItems":
"""Lists all containers and does not support a prefix like data plane. Also SRP today does not
return continuation token.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListContainerItems, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ListContainerItems
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainerItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListContainerItems', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers'} # type: ignore
async def create(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs
) -> "_models.BlobContainer":
"""Creates a new container under the specified account as described by request body. The container
resource includes metadata and properties for that container. It does not include a list of the
blobs contained by the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties of the blob container to create.
:type blob_container: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
container_name: str,
blob_container: "_models.BlobContainer",
**kwargs
) -> "_models.BlobContainer":
"""Updates container properties as specified in request body. Properties not mentioned in the
request will be unchanged. Update fails if the specified container doesn't already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties to update for the blob container.
:type blob_container: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
async def get(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs
) -> "_models.BlobContainer":
"""Gets properties of a specified container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
container_name: str,
**kwargs
) -> None:
"""Deletes specified container under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
async def set_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs
) -> "_models.LegalHold":
"""Sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold
follows an append pattern and does not clear out the existing tags that are not specified in
the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be set to a blob container.
:type legal_hold: ~azure.mgmt.storage.v2018_07_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_legal_hold.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold'} # type: ignore
async def clear_legal_hold(
self,
resource_group_name: str,
account_name: str,
container_name: str,
legal_hold: "_models.LegalHold",
**kwargs
) -> "_models.LegalHold":
"""Clears legal hold tags. Clearing the same or non-existent tag results in an idempotent
operation. ClearLegalHold clears out only the specified tags in the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be clear from a blob container.
:type legal_hold: ~azure.mgmt.storage.v2018_07_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.clear_legal_hold.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clear_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold'} # type: ignore
async def create_or_update_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
|
create_or_update_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
async def get_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: Optional[str] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Gets the existing immutability policy along with the corresponding ETag in response headers and
body.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.get_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
async def delete_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Aborts an unlocked immutability policy. The response of delete has
immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this
operation. Deleting a locked immutability policy is not allowed, only way is to delete the
container after deleting all blobs inside the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.delete_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
async def lock_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is
ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
accept = "application/json"
# Construct URL
url = self.lock_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lock_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock'} # type: ignore
async def extend_immutability_policy(
self,
resource_group_name: str,
account_name: str,
container_name: str,
if_match: str,
parameters: Optional["_models.ImmutabilityPolicy"] = None,
**kwargs
) -> "_models.ImmutabilityPolicy":
"""Extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only
action allowed on a Locked policy will be this action. ETag in If-Match is required for this
operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be extended for a blob
container.
:type parameters: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.extend_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
extend_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend'} # type: ignore
async def lease(
self,
resource_group_name: str,
account_name: str,
container_name: str,
parameters: Optional["_models.LeaseContainerRequest"] = None,
**kwargs
) -> "_models.LeaseContainerResponse":
"""The Lease Container operation establishes and manages a lock on a container for delete
operations. The lock duration can be 15 to 60 seconds, or can be infinite.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param parameters: Lease Container request body.
:type parameters: ~azure.mgmt.storage.v2018_07_01.models.LeaseContainerRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LeaseContainerResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.LeaseContainerResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LeaseContainerResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.lease.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'LeaseContainerRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LeaseContainerResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease'} # type: ignore
| """Creates or updates an unlocked immutability policy. ETag in If-Match is honored if given but
not required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be created or updated to a blob
container.
:type parameters: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_07_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized |
index.ts | import { translateRaw } from 'translations';
import { IHexStrWeb3Transaction } from 'libs/transaction';
import { INode } from 'libs/nodes/INode';
import {
isValidSendTransaction,
isValidSignMessage,
isValidGetAccounts,
isValidGetNetVersion
} from 'libs/validators';
import RPCNode from '../rpc';
import Web3Client from './client';
import Web3Requests from './requests';
const METAMASK_PERMISSION_DENIED_ERROR = translateRaw('METAMASK_PERMISSION_DENIED');
export default class Web3Node extends RPCNode {
public client: Web3Client;
public requests: Web3Requests;
constructor() {
super('web3'); // initialized with fake endpoint
this.client = new Web3Client();
this.requests = new Web3Requests();
}
public getNetVersion(): Promise<string> {
return this.client
.call(this.requests.getNetVersion())
.then(isValidGetNetVersion)
.then(({ result }) => result);
}
public sendTransaction(web3Tx: IHexStrWeb3Transaction): Promise<string> {
return this.client
.call(this.requests.sendTransaction(web3Tx))
.then(isValidSendTransaction)
.then(({ result }) => result);
}
public signMessage(msgHex: string, fromAddr: string): Promise<string> {
return this.client
.call(this.requests.signMessage(msgHex, fromAddr))
.then(isValidSignMessage)
.then(({ result }) => result);
}
public getAccounts(): Promise<string> {
return this.client
.call(this.requests.getAccounts())
.then(isValidGetAccounts)
.then(({ result }) => result);
}
}
export function isWeb3Node(nodeLib: INode | Web3Node): nodeLib is Web3Node {
return nodeLib instanceof Web3Node;
}
export const Web3Service = 'MetaMask / Mist';
export async function getChainIdAndLib() {
const lib = new Web3Node();
const chainId = await lib.getNetVersion();
const accounts = await lib.getAccounts();
if (!accounts.length) {
throw new Error('No accounts found in MetaMask / Mist.');
}
if (chainId === 'loading') {
throw new Error('MetaMask / Mist is still loading. Please refresh the page and try again.');
}
return { chainId, lib };
}
export async function setupWeb3Node() {
// Handle the following MetaMask breaking change:
// https://medium.com/metamask/https-medium-com-metamask-breaking-change-injecting-web3-7722797916a8
const { ethereum } = window as any;
if (ethereum) {
// Overwrite the legacy Web3 with the newer version.
(window as any).web3 = new (window as any).Web3(ethereum);
try {
// Request permission to access MetaMask accounts.
await ethereum.enable();
// Permission was granted; proceed.
return getChainIdAndLib();
} catch (e) {
// Permission was denied; handle appropriately.
throw new Error(METAMASK_PERMISSION_DENIED_ERROR);
}
} else if ((window as any).web3) {
// Legacy handling; will become unavailable 11/2.
const { web3 } = window as any;
if (!web3 || !web3.currentProvider || !web3.currentProvider.sendAsync) { | throw new Error('Web3 not found. Please check that MetaMask is installed');
}
return getChainIdAndLib();
} else {
throw new Error('Web3 not found. Please check that MetaMask is installed');
}
}
export async function isWeb3NodeAvailable(): Promise<boolean> {
try {
await setupWeb3Node();
return true;
} catch (e) {
// If the specific error is that the request for MetaMask permission was denied,
// re-throw the error and allow the caller to handle it.
if (e.message === METAMASK_PERMISSION_DENIED_ERROR) {
throw e;
}
// Otherwise, chances are the MetaMask extension isn't installed.
return false;
}
}
export async function ensureWeb3NodeStillAvailable(): Promise<boolean> {
try {
const { ethereum } = window as any;
// Legacy handling; will become unavailable 11/2.
if (!ethereum) {
return true;
}
await ethereum.enable();
return true;
} catch (e) {
return false;
}
} | |
0001_initial.py | # Generated by Django 3.1.1 on 2020-09-28 19:21
import datetime
from django.db import migrations, models
import django.db.models.deletion
class | (migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=200)),
('state', models.CharField(max_length=200)),
('zipcode', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.realtor')),
],
),
]
| Migration |
client.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(std::fmt::Debug)]
pub(crate) struct Handle<C = aws_hyper::DynConnector> {
client: aws_hyper::Client<C>,
conf: crate::Config,
}
#[derive(Clone, std::fmt::Debug)]
pub struct Client<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<Handle<C>>,
}
impl<C> Client<C> {
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let client = aws_hyper::Client::new(conn);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl Client {
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_env() -> Self {
Self::from_conf(crate::Config::builder().build())
}
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let client = aws_hyper::Client::https();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl<C> Client<C>
where
C: aws_hyper::SmithyConnector,
{
pub fn add_layer_version_permission(&self) -> fluent_builders::AddLayerVersionPermission<C> {
fluent_builders::AddLayerVersionPermission::new(self.handle.clone())
}
pub fn add_permission(&self) -> fluent_builders::AddPermission<C> {
fluent_builders::AddPermission::new(self.handle.clone())
}
pub fn create_alias(&self) -> fluent_builders::CreateAlias<C> {
fluent_builders::CreateAlias::new(self.handle.clone())
}
pub fn create_code_signing_config(&self) -> fluent_builders::CreateCodeSigningConfig<C> {
fluent_builders::CreateCodeSigningConfig::new(self.handle.clone())
}
pub fn create_event_source_mapping(&self) -> fluent_builders::CreateEventSourceMapping<C> {
fluent_builders::CreateEventSourceMapping::new(self.handle.clone())
}
pub fn create_function(&self) -> fluent_builders::CreateFunction<C> {
fluent_builders::CreateFunction::new(self.handle.clone())
}
pub fn delete_alias(&self) -> fluent_builders::DeleteAlias<C> {
fluent_builders::DeleteAlias::new(self.handle.clone())
}
pub fn delete_code_signing_config(&self) -> fluent_builders::DeleteCodeSigningConfig<C> {
fluent_builders::DeleteCodeSigningConfig::new(self.handle.clone())
}
pub fn delete_event_source_mapping(&self) -> fluent_builders::DeleteEventSourceMapping<C> {
fluent_builders::DeleteEventSourceMapping::new(self.handle.clone())
}
pub fn delete_function(&self) -> fluent_builders::DeleteFunction<C> {
fluent_builders::DeleteFunction::new(self.handle.clone())
}
pub fn delete_function_code_signing_config(
&self,
) -> fluent_builders::DeleteFunctionCodeSigningConfig<C> {
fluent_builders::DeleteFunctionCodeSigningConfig::new(self.handle.clone())
}
pub fn delete_function_concurrency(&self) -> fluent_builders::DeleteFunctionConcurrency<C> {
fluent_builders::DeleteFunctionConcurrency::new(self.handle.clone())
}
pub fn delete_function_event_invoke_config(
&self,
) -> fluent_builders::DeleteFunctionEventInvokeConfig<C> {
fluent_builders::DeleteFunctionEventInvokeConfig::new(self.handle.clone())
}
pub fn delete_layer_version(&self) -> fluent_builders::DeleteLayerVersion<C> {
fluent_builders::DeleteLayerVersion::new(self.handle.clone())
}
pub fn delete_provisioned_concurrency_config(
&self,
) -> fluent_builders::DeleteProvisionedConcurrencyConfig<C> {
fluent_builders::DeleteProvisionedConcurrencyConfig::new(self.handle.clone())
}
pub fn get_account_settings(&self) -> fluent_builders::GetAccountSettings<C> {
fluent_builders::GetAccountSettings::new(self.handle.clone())
}
pub fn get_alias(&self) -> fluent_builders::GetAlias<C> {
fluent_builders::GetAlias::new(self.handle.clone())
}
pub fn get_code_signing_config(&self) -> fluent_builders::GetCodeSigningConfig<C> {
fluent_builders::GetCodeSigningConfig::new(self.handle.clone())
}
pub fn get_event_source_mapping(&self) -> fluent_builders::GetEventSourceMapping<C> {
fluent_builders::GetEventSourceMapping::new(self.handle.clone())
}
pub fn get_function(&self) -> fluent_builders::GetFunction<C> {
fluent_builders::GetFunction::new(self.handle.clone())
}
pub fn get_function_code_signing_config(
&self,
) -> fluent_builders::GetFunctionCodeSigningConfig<C> {
fluent_builders::GetFunctionCodeSigningConfig::new(self.handle.clone())
}
pub fn get_function_concurrency(&self) -> fluent_builders::GetFunctionConcurrency<C> {
fluent_builders::GetFunctionConcurrency::new(self.handle.clone())
}
pub fn get_function_configuration(&self) -> fluent_builders::GetFunctionConfiguration<C> {
fluent_builders::GetFunctionConfiguration::new(self.handle.clone())
}
pub fn get_function_event_invoke_config(
&self,
) -> fluent_builders::GetFunctionEventInvokeConfig<C> {
fluent_builders::GetFunctionEventInvokeConfig::new(self.handle.clone())
}
pub fn get_layer_version(&self) -> fluent_builders::GetLayerVersion<C> {
fluent_builders::GetLayerVersion::new(self.handle.clone())
}
pub fn get_layer_version_by_arn(&self) -> fluent_builders::GetLayerVersionByArn<C> {
fluent_builders::GetLayerVersionByArn::new(self.handle.clone())
}
pub fn get_layer_version_policy(&self) -> fluent_builders::GetLayerVersionPolicy<C> {
fluent_builders::GetLayerVersionPolicy::new(self.handle.clone())
}
pub fn get_policy(&self) -> fluent_builders::GetPolicy<C> {
fluent_builders::GetPolicy::new(self.handle.clone())
}
pub fn get_provisioned_concurrency_config(
&self,
) -> fluent_builders::GetProvisionedConcurrencyConfig<C> {
fluent_builders::GetProvisionedConcurrencyConfig::new(self.handle.clone())
}
pub fn invoke(&self) -> fluent_builders::Invoke<C> {
fluent_builders::Invoke::new(self.handle.clone())
}
pub fn invoke_async(&self) -> fluent_builders::InvokeAsync<C> {
fluent_builders::InvokeAsync::new(self.handle.clone())
}
pub fn list_aliases(&self) -> fluent_builders::ListAliases<C> {
fluent_builders::ListAliases::new(self.handle.clone())
}
pub fn list_code_signing_configs(&self) -> fluent_builders::ListCodeSigningConfigs<C> {
fluent_builders::ListCodeSigningConfigs::new(self.handle.clone())
}
pub fn list_event_source_mappings(&self) -> fluent_builders::ListEventSourceMappings<C> {
fluent_builders::ListEventSourceMappings::new(self.handle.clone())
}
pub fn list_function_event_invoke_configs(
&self,
) -> fluent_builders::ListFunctionEventInvokeConfigs<C> {
fluent_builders::ListFunctionEventInvokeConfigs::new(self.handle.clone())
}
pub fn list_functions(&self) -> fluent_builders::ListFunctions<C> {
fluent_builders::ListFunctions::new(self.handle.clone())
}
pub fn list_functions_by_code_signing_config(
&self,
) -> fluent_builders::ListFunctionsByCodeSigningConfig<C> {
fluent_builders::ListFunctionsByCodeSigningConfig::new(self.handle.clone())
}
pub fn list_layers(&self) -> fluent_builders::ListLayers<C> {
fluent_builders::ListLayers::new(self.handle.clone())
}
pub fn list_layer_versions(&self) -> fluent_builders::ListLayerVersions<C> {
fluent_builders::ListLayerVersions::new(self.handle.clone())
}
pub fn list_provisioned_concurrency_configs(
&self,
) -> fluent_builders::ListProvisionedConcurrencyConfigs<C> {
fluent_builders::ListProvisionedConcurrencyConfigs::new(self.handle.clone())
}
pub fn list_tags(&self) -> fluent_builders::ListTags<C> {
fluent_builders::ListTags::new(self.handle.clone())
}
pub fn list_versions_by_function(&self) -> fluent_builders::ListVersionsByFunction<C> {
fluent_builders::ListVersionsByFunction::new(self.handle.clone())
}
pub fn publish_layer_version(&self) -> fluent_builders::PublishLayerVersion<C> {
fluent_builders::PublishLayerVersion::new(self.handle.clone())
}
pub fn publish_version(&self) -> fluent_builders::PublishVersion<C> {
fluent_builders::PublishVersion::new(self.handle.clone())
}
pub fn put_function_code_signing_config(
&self,
) -> fluent_builders::PutFunctionCodeSigningConfig<C> {
fluent_builders::PutFunctionCodeSigningConfig::new(self.handle.clone())
}
pub fn put_function_concurrency(&self) -> fluent_builders::PutFunctionConcurrency<C> {
fluent_builders::PutFunctionConcurrency::new(self.handle.clone())
}
pub fn put_function_event_invoke_config(
&self,
) -> fluent_builders::PutFunctionEventInvokeConfig<C> {
fluent_builders::PutFunctionEventInvokeConfig::new(self.handle.clone())
}
pub fn put_provisioned_concurrency_config(
&self,
) -> fluent_builders::PutProvisionedConcurrencyConfig<C> {
fluent_builders::PutProvisionedConcurrencyConfig::new(self.handle.clone())
}
pub fn remove_layer_version_permission(
&self,
) -> fluent_builders::RemoveLayerVersionPermission<C> {
fluent_builders::RemoveLayerVersionPermission::new(self.handle.clone())
}
pub fn remove_permission(&self) -> fluent_builders::RemovePermission<C> {
fluent_builders::RemovePermission::new(self.handle.clone())
}
pub fn tag_resource(&self) -> fluent_builders::TagResource<C> {
fluent_builders::TagResource::new(self.handle.clone())
}
pub fn untag_resource(&self) -> fluent_builders::UntagResource<C> {
fluent_builders::UntagResource::new(self.handle.clone())
}
pub fn update_alias(&self) -> fluent_builders::UpdateAlias<C> {
fluent_builders::UpdateAlias::new(self.handle.clone())
}
pub fn update_code_signing_config(&self) -> fluent_builders::UpdateCodeSigningConfig<C> {
fluent_builders::UpdateCodeSigningConfig::new(self.handle.clone())
}
pub fn update_event_source_mapping(&self) -> fluent_builders::UpdateEventSourceMapping<C> {
fluent_builders::UpdateEventSourceMapping::new(self.handle.clone())
}
pub fn update_function_code(&self) -> fluent_builders::UpdateFunctionCode<C> {
fluent_builders::UpdateFunctionCode::new(self.handle.clone())
}
pub fn update_function_configuration(&self) -> fluent_builders::UpdateFunctionConfiguration<C> {
fluent_builders::UpdateFunctionConfiguration::new(self.handle.clone())
}
pub fn update_function_event_invoke_config(
&self,
) -> fluent_builders::UpdateFunctionEventInvokeConfig<C> {
fluent_builders::UpdateFunctionEventInvokeConfig::new(self.handle.clone())
}
}
pub mod fluent_builders {
#[derive(std::fmt::Debug)]
pub struct AddLayerVersionPermission<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::add_layer_version_permission_input::Builder,
}
impl<C> AddLayerVersionPermission<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::AddLayerVersionPermissionOutput,
smithy_http::result::SdkError<crate::error::AddLayerVersionPermissionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name or Amazon Resource Name (ARN) of the layer.</p>
pub fn layer_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_name(input);
self
}
pub fn set_layer_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_layer_name(input);
self
}
/// <p>The version number.</p>
pub fn version_number(mut self, input: i64) -> Self {
self.inner = self.inner.version_number(input);
self
}
pub fn set_version_number(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_version_number(input);
self
}
/// <p>An identifier that distinguishes the policy from others on the same layer version.</p>
pub fn statement_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.statement_id(input);
self
}
pub fn set_statement_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_statement_id(input);
self
}
/// <p>The API action that grants access to the layer. For example, <code>lambda:GetLayerVersion</code>.</p>
pub fn action(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.action(input);
self
}
pub fn set_action(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_action(input);
self
}
/// <p>An account ID, or <code>*</code> to grant permission to all AWS accounts.</p>
pub fn principal(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.principal(input);
self
}
pub fn set_principal(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_principal(input);
self
}
/// <p>With the principal set to <code>*</code>, grant permission to all accounts in the specified
/// organization.</p>
pub fn organization_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.organization_id(input);
self
}
pub fn set_organization_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_organization_id(input);
self
}
/// <p>Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a
/// policy that has changed since you last read it.</p>
pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.revision_id(input);
self
}
pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_revision_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct AddPermission<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::add_permission_input::Builder,
}
impl<C> AddPermission<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::AddPermissionOutput,
smithy_http::result::SdkError<crate::error::AddPermissionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>A statement identifier that differentiates the statement from others in the same policy.</p>
pub fn statement_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.statement_id(input);
self
}
pub fn set_statement_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_statement_id(input);
self
}
/// <p>The action that the principal can use on the function. For example, <code>lambda:InvokeFunction</code> or
/// <code>lambda:GetFunction</code>.</p>
pub fn action(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.action(input);
self
}
pub fn set_action(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_action(input);
self
}
/// <p>The AWS service or account that invokes the function. If you specify a service, use <code>SourceArn</code> or
/// <code>SourceAccount</code> to limit who can invoke the function through that service.</p>
pub fn principal(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.principal(input);
self
}
pub fn set_principal(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_principal(input);
self
}
/// <p>For AWS services, the ARN of the AWS resource that invokes the function. For example, an Amazon S3 bucket or
/// Amazon SNS topic.</p>
pub fn source_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.source_arn(input);
self
}
pub fn set_source_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_source_arn(input);
self
}
/// <p>For Amazon S3, the ID of the account that owns the resource. Use this together with <code>SourceArn</code> to
/// ensure that the resource is owned by the specified account. It is possible for an Amazon S3 bucket to be deleted
/// by its owner and recreated by another account.</p>
pub fn source_account(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.source_account(input);
self
}
pub fn set_source_account(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_source_account(input);
self
}
/// <p>For Alexa Smart Home functions, a token that must be supplied by the invoker.</p>
pub fn event_source_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.event_source_token(input);
self
}
pub fn set_event_source_token(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_event_source_token(input);
self
}
/// <p>Specify a version or alias to add permissions to a published version of the function.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
/// <p>Only update the policy if the revision ID matches the ID that's specified. Use this option to avoid modifying a
/// policy that has changed since you last read it.</p>
pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.revision_id(input);
self
}
pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_revision_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateAlias<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_alias_input::Builder,
}
impl<C> CreateAlias<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateAliasOutput,
smithy_http::result::SdkError<crate::error::CreateAliasError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The name of the alias.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>The function version that the alias invokes.</p>
pub fn function_version(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_version(input);
self
}
pub fn set_function_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_version(input);
self
}
/// <p>A description of the alias.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html#configuring-alias-routing">routing
/// configuration</a> of the alias.</p>
pub fn routing_config(mut self, input: crate::model::AliasRoutingConfiguration) -> Self {
self.inner = self.inner.routing_config(input);
self
}
pub fn set_routing_config(
mut self,
input: std::option::Option<crate::model::AliasRoutingConfiguration>,
) -> Self {
self.inner = self.inner.set_routing_config(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateCodeSigningConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_code_signing_config_input::Builder,
}
impl<C> CreateCodeSigningConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateCodeSigningConfigOutput,
smithy_http::result::SdkError<crate::error::CreateCodeSigningConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Descriptive name for this code signing configuration.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>Signing profiles for this code signing configuration.</p>
pub fn allowed_publishers(mut self, input: crate::model::AllowedPublishers) -> Self {
self.inner = self.inner.allowed_publishers(input);
self
}
pub fn set_allowed_publishers(
mut self,
input: std::option::Option<crate::model::AllowedPublishers>,
) -> Self {
self.inner = self.inner.set_allowed_publishers(input);
self
}
/// <p>The code signing policies define the actions to take if the validation checks fail. </p>
pub fn code_signing_policies(mut self, input: crate::model::CodeSigningPolicies) -> Self {
self.inner = self.inner.code_signing_policies(input);
self
}
pub fn set_code_signing_policies(
mut self,
input: std::option::Option<crate::model::CodeSigningPolicies>,
) -> Self {
self.inner = self.inner.set_code_signing_policies(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateEventSourceMapping<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_event_source_mapping_input::Builder,
}
impl<C> CreateEventSourceMapping<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateEventSourceMappingOutput,
smithy_http::result::SdkError<crate::error::CreateEventSourceMappingError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the event source.</p>
/// <ul>
/// <li>
/// <p>
/// <b>Amazon Kinesis</b> - The ARN of the data stream or a stream consumer.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon DynamoDB Streams</b> - The ARN of the stream.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon Simple Queue Service</b> - The ARN of the queue.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon Managed Streaming for Apache Kafka</b> - The ARN of the cluster.</p>
/// </li>
/// </ul>
pub fn event_source_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.event_source_arn(input);
self
}
pub fn set_event_source_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_event_source_arn(input);
self
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Version or Alias ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>If true, the event source mapping is active. Set to false to pause polling and invocation.</p>
pub fn enabled(mut self, input: bool) -> Self {
self.inner = self.inner.enabled(input);
self
}
pub fn set_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_enabled(input);
self
}
/// <p>The maximum number of items to retrieve in a single batch.</p>
/// <ul>
/// <li>
/// <p>
/// <b>Amazon Kinesis</b> - Default 100. Max 10,000.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon DynamoDB Streams</b> - Default 100. Max 1,000.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon Simple Queue Service</b> - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon Managed Streaming for Apache Kafka</b> - Default 100. Max 10,000.</p>
/// </li>
/// <li>
/// <p>
/// <b>Self-Managed Apache Kafka</b> - Default 100. Max 10,000.</p>
/// </li>
/// </ul>
pub fn batch_size(mut self, input: i32) -> Self {
self.inner = self.inner.batch_size(input);
self
}
pub fn set_batch_size(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_batch_size(input);
self
}
/// <p>(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds.</p>
pub fn maximum_batching_window_in_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_batching_window_in_seconds(input);
self
}
pub fn set_maximum_batching_window_in_seconds(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_maximum_batching_window_in_seconds(input);
self
}
/// <p>(Streams) The number of batches to process from each shard concurrently.</p>
pub fn parallelization_factor(mut self, input: i32) -> Self {
self.inner = self.inner.parallelization_factor(input);
self
}
pub fn set_parallelization_factor(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_parallelization_factor(input);
self
}
/// <p>The position in a stream from which to start reading. Required for Amazon Kinesis, Amazon DynamoDB, and Amazon MSK Streams
/// sources. <code>AT_TIMESTAMP</code> is only supported for Amazon Kinesis streams.</p>
pub fn starting_position(mut self, input: crate::model::EventSourcePosition) -> Self {
self.inner = self.inner.starting_position(input);
self
}
pub fn set_starting_position(
mut self,
input: std::option::Option<crate::model::EventSourcePosition>,
) -> Self {
self.inner = self.inner.set_starting_position(input);
self
}
/// <p>With <code>StartingPosition</code> set to <code>AT_TIMESTAMP</code>, the time from which to start
/// reading.</p>
pub fn starting_position_timestamp(mut self, input: smithy_types::Instant) -> Self {
self.inner = self.inner.starting_position_timestamp(input);
self
}
pub fn set_starting_position_timestamp(
mut self,
input: std::option::Option<smithy_types::Instant>,
) -> Self {
self.inner = self.inner.set_starting_position_timestamp(input);
self
}
/// <p>(Streams) An Amazon SQS queue or Amazon SNS topic destination for discarded records.</p>
pub fn destination_config(mut self, input: crate::model::DestinationConfig) -> Self {
self.inner = self.inner.destination_config(input);
self
}
pub fn set_destination_config(
mut self,
input: std::option::Option<crate::model::DestinationConfig>,
) -> Self {
self.inner = self.inner.set_destination_config(input);
self
}
/// <p>(Streams) Discard records older than the specified age. The default value is infinite (-1).</p>
pub fn maximum_record_age_in_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_record_age_in_seconds(input);
self
}
pub fn set_maximum_record_age_in_seconds(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_maximum_record_age_in_seconds(input);
self
}
/// <p>(Streams) If the function returns an error, split the batch in two and retry.</p>
pub fn bisect_batch_on_function_error(mut self, input: bool) -> Self {
self.inner = self.inner.bisect_batch_on_function_error(input);
self
}
pub fn set_bisect_batch_on_function_error(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.inner = self.inner.set_bisect_batch_on_function_error(input);
self
}
/// <p>(Streams) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records will be retried until the record expires.</p>
pub fn maximum_retry_attempts(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_retry_attempts(input);
self
}
pub fn set_maximum_retry_attempts(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_maximum_retry_attempts(input);
self
}
/// <p>(Streams) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.</p>
pub fn tumbling_window_in_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.tumbling_window_in_seconds(input);
self
}
pub fn set_tumbling_window_in_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_tumbling_window_in_seconds(input);
self
}
/// <p>The name of the Kafka topic.</p>
pub fn topics(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.topics(inp);
self
}
pub fn set_topics(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_topics(input);
self
}
/// <p>
/// (MQ) The name of the Amazon MQ broker destination queue to consume.
/// </p>
pub fn queues(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.queues(inp);
self
}
pub fn set_queues(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_queues(input);
self
}
/// <p>An array of the authentication protocol, or the VPC components to secure your event source.</p>
pub fn source_access_configurations(
mut self,
inp: impl Into<crate::model::SourceAccessConfiguration>,
) -> Self {
self.inner = self.inner.source_access_configurations(inp);
self
}
pub fn set_source_access_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SourceAccessConfiguration>>,
) -> Self {
self.inner = self.inner.set_source_access_configurations(input);
self
}
/// <p>The Self-Managed Apache Kafka cluster to send records.</p>
pub fn self_managed_event_source(
mut self,
input: crate::model::SelfManagedEventSource,
) -> Self {
self.inner = self.inner.self_managed_event_source(input);
self
}
pub fn set_self_managed_event_source(
mut self,
input: std::option::Option<crate::model::SelfManagedEventSource>,
) -> Self {
self.inner = self.inner.set_self_managed_event_source(input);
self
}
/// <p>(Streams) A list of current response type enums applied to the event source mapping.</p>
pub fn function_response_types(
mut self,
inp: impl Into<crate::model::FunctionResponseType>,
) -> Self {
self.inner = self.inner.function_response_types(inp);
self
}
pub fn set_function_response_types(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::FunctionResponseType>>,
) -> Self {
self.inner = self.inner.set_function_response_types(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct CreateFunction<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::create_function_input::Builder,
}
impl<C> CreateFunction<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::CreateFunctionOutput,
smithy_http::result::SdkError<crate::error::CreateFunctionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The identifier of the function's <a href="https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html">runtime</a>.</p>
pub fn runtime(mut self, input: crate::model::Runtime) -> Self {
self.inner = self.inner.runtime(input);
self
}
pub fn set_runtime(mut self, input: std::option::Option<crate::model::Runtime>) -> Self {
self.inner = self.inner.set_runtime(input);
self
}
/// <p>The Amazon Resource Name (ARN) of the function's execution role.</p>
pub fn role(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role(input);
self
}
pub fn set_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role(input);
self
}
/// <p>The name of the method within your code that Lambda calls to execute your function. The format includes the
/// file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information,
/// see <a href="https://docs.aws.amazon.com/lambda/latest/dg/programming-model-v2.html">Programming Model</a>.</p>
pub fn handler(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.handler(input);
self
}
pub fn set_handler(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_handler(input);
self
}
/// <p>The code for the function.</p>
pub fn code(mut self, input: crate::model::FunctionCode) -> Self {
self.inner = self.inner.code(input);
self
}
pub fn set_code(mut self, input: std::option::Option<crate::model::FunctionCode>) -> Self {
self.inner = self.inner.set_code(input);
self
}
/// <p>A description of the function.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The amount of time that Lambda allows a function to run before stopping it. The default is 3 seconds. The
/// maximum allowed value is 900 seconds.</p>
pub fn timeout(mut self, input: i32) -> Self {
self.inner = self.inner.timeout(input);
self
}
pub fn set_timeout(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_timeout(input);
self
}
/// <p>The amount of memory available to the function at runtime. Increasing the function's memory also increases its CPU
/// allocation. The default value is 128 MB. The value can be any multiple of 1 MB.</p>
pub fn memory_size(mut self, input: i32) -> Self {
self.inner = self.inner.memory_size(input);
self
}
pub fn set_memory_size(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_memory_size(input);
self
}
/// <p>Set to true to publish the first version of the function during creation.</p>
pub fn publish(mut self, input: bool) -> Self {
self.inner = self.inner.publish(input);
self
}
pub fn set_publish(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_publish(input);
self
}
/// <p>For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC.
/// When you connect a function to a VPC, it can only access resources and the internet through that VPC. For more
/// information, see <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html">VPC Settings</a>.</p>
pub fn vpc_config(mut self, input: crate::model::VpcConfig) -> Self {
self.inner = self.inner.vpc_config(input);
self
}
pub fn set_vpc_config(
mut self,
input: std::option::Option<crate::model::VpcConfig>,
) -> Self {
self.inner = self.inner.set_vpc_config(input);
self
}
/// <p>The type of deployment package. Set to <code>Image</code> for container image and set <code>Zip</code> for ZIP archive.</p>
pub fn package_type(mut self, input: crate::model::PackageType) -> Self {
self.inner = self.inner.package_type(input);
self
}
pub fn set_package_type(
mut self,
input: std::option::Option<crate::model::PackageType>,
) -> Self {
self.inner = self.inner.set_package_type(input);
self
}
/// <p>A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events
/// when they fail processing. For more information, see <a href="https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#dlq">Dead Letter Queues</a>.</p>
pub fn dead_letter_config(mut self, input: crate::model::DeadLetterConfig) -> Self {
self.inner = self.inner.dead_letter_config(input);
self
}
pub fn set_dead_letter_config(
mut self,
input: std::option::Option<crate::model::DeadLetterConfig>,
) -> Self {
self.inner = self.inner.set_dead_letter_config(input);
self
}
/// <p>Environment variables that are accessible from function code during execution.</p>
pub fn environment(mut self, input: crate::model::Environment) -> Self {
self.inner = self.inner.environment(input);
self
}
pub fn set_environment(
mut self,
input: std::option::Option<crate::model::Environment>,
) -> Self {
self.inner = self.inner.set_environment(input);
self
}
/// <p>The ARN of the AWS Key Management Service (AWS KMS) key that's used to encrypt your function's environment
/// variables. If it's not provided, AWS Lambda uses a default service key.</p>
pub fn kms_key_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.kms_key_arn(input);
self
}
pub fn set_kms_key_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_kms_key_arn(input);
self
}
/// <p>Set <code>Mode</code> to <code>Active</code> to sample and trace a subset of incoming requests with AWS
/// X-Ray.</p>
pub fn tracing_config(mut self, input: crate::model::TracingConfig) -> Self {
self.inner = self.inner.tracing_config(input);
self
}
pub fn set_tracing_config(
mut self,
input: std::option::Option<crate::model::TracingConfig>,
) -> Self {
self.inner = self.inner.set_tracing_config(input);
self
}
/// <p>A list of <a href="https://docs.aws.amazon.com/lambda/latest/dg/tagging.html">tags</a> to apply to the
/// function.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
/// <p>A list of <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html">function layers</a>
/// to add to the function's execution environment. Specify each layer by its ARN, including the version.</p>
pub fn layers(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layers(inp);
self
}
pub fn set_layers(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_layers(input);
self
}
/// <p>Connection settings for an Amazon EFS file system.</p>
pub fn file_system_configs(
mut self,
inp: impl Into<crate::model::FileSystemConfig>,
) -> Self {
self.inner = self.inner.file_system_configs(inp);
self
}
pub fn set_file_system_configs(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::FileSystemConfig>>,
) -> Self {
self.inner = self.inner.set_file_system_configs(input);
self
}
/// <p>
/// <a href="https://docs.aws.amazon.com/lambda/latest/dg/images-parms.html">Container image configuration
/// values</a> that override the values in the container image Dockerfile.</p>
pub fn image_config(mut self, input: crate::model::ImageConfig) -> Self {
self.inner = self.inner.image_config(input);
self
}
pub fn set_image_config(
mut self,
input: std::option::Option<crate::model::ImageConfig>,
) -> Self {
self.inner = self.inner.set_image_config(input);
self
}
/// <p>To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration
/// includes a set of signing profiles, which define the trusted publishers for this function.</p>
pub fn code_signing_config_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.code_signing_config_arn(input);
self
}
pub fn set_code_signing_config_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_code_signing_config_arn(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteAlias<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_alias_input::Builder,
}
impl<C> DeleteAlias<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteAliasOutput,
smithy_http::result::SdkError<crate::error::DeleteAliasError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The name of the alias.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteCodeSigningConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_code_signing_config_input::Builder,
}
impl<C> DeleteCodeSigningConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteCodeSigningConfigOutput,
smithy_http::result::SdkError<crate::error::DeleteCodeSigningConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The The Amazon Resource Name (ARN) of the code signing configuration.</p>
pub fn code_signing_config_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.code_signing_config_arn(input);
self
}
pub fn set_code_signing_config_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_code_signing_config_arn(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteEventSourceMapping<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_event_source_mapping_input::Builder,
}
impl<C> DeleteEventSourceMapping<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteEventSourceMappingOutput,
smithy_http::result::SdkError<crate::error::DeleteEventSourceMappingError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The identifier of the event source mapping.</p>
pub fn uuid(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.uuid(input);
self
}
pub fn set_uuid(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_uuid(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteFunction<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_function_input::Builder,
}
impl<C> DeleteFunction<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteFunctionOutput,
smithy_http::result::SdkError<crate::error::DeleteFunctionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function or version.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:1</code> (with version).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Specify a version to delete. You can't delete a version that's referenced by an alias.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteFunctionCodeSigningConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_function_code_signing_config_input::Builder,
}
impl<C> DeleteFunctionCodeSigningConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteFunctionCodeSigningConfigOutput,
smithy_http::result::SdkError<crate::error::DeleteFunctionCodeSigningConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteFunctionConcurrency<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_function_concurrency_input::Builder,
}
impl<C> DeleteFunctionConcurrency<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteFunctionConcurrencyOutput,
smithy_http::result::SdkError<crate::error::DeleteFunctionConcurrencyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteFunctionEventInvokeConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_function_event_invoke_config_input::Builder,
}
impl<C> DeleteFunctionEventInvokeConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteFunctionEventInvokeConfigOutput,
smithy_http::result::SdkError<crate::error::DeleteFunctionEventInvokeConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>A version number or alias name.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteLayerVersion<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_layer_version_input::Builder,
}
impl<C> DeleteLayerVersion<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteLayerVersionOutput,
smithy_http::result::SdkError<crate::error::DeleteLayerVersionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name or Amazon Resource Name (ARN) of the layer.</p>
pub fn layer_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_name(input);
self
}
pub fn set_layer_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_layer_name(input);
self
}
/// <p>The version number.</p>
pub fn version_number(mut self, input: i64) -> Self {
self.inner = self.inner.version_number(input);
self
}
pub fn set_version_number(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_version_number(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DeleteProvisionedConcurrencyConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::delete_provisioned_concurrency_config_input::Builder,
}
impl<C> DeleteProvisionedConcurrencyConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteProvisionedConcurrencyConfigOutput,
smithy_http::result::SdkError<crate::error::DeleteProvisionedConcurrencyConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The version number or alias name.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetAccountSettings<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_account_settings_input::Builder,
}
impl<C> GetAccountSettings<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetAccountSettingsOutput,
smithy_http::result::SdkError<crate::error::GetAccountSettingsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
}
#[derive(std::fmt::Debug)]
pub struct GetAlias<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_alias_input::Builder,
}
impl<C> GetAlias<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetAliasOutput,
smithy_http::result::SdkError<crate::error::GetAliasError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The name of the alias.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetCodeSigningConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_code_signing_config_input::Builder,
}
impl<C> GetCodeSigningConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetCodeSigningConfigOutput,
smithy_http::result::SdkError<crate::error::GetCodeSigningConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The The Amazon Resource Name (ARN) of the code signing configuration. </p>
pub fn code_signing_config_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.code_signing_config_arn(input);
self
}
pub fn set_code_signing_config_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_code_signing_config_arn(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetEventSourceMapping<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_event_source_mapping_input::Builder,
}
impl<C> GetEventSourceMapping<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetEventSourceMappingOutput,
smithy_http::result::SdkError<crate::error::GetEventSourceMappingError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The identifier of the event source mapping.</p>
pub fn uuid(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.uuid(input);
self
}
pub fn set_uuid(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_uuid(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetFunction<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_function_input::Builder,
}
impl<C> GetFunction<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetFunctionOutput,
smithy_http::result::SdkError<crate::error::GetFunctionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Specify a version or alias to get details about a published version of the function.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetFunctionCodeSigningConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_function_code_signing_config_input::Builder,
}
impl<C> GetFunctionCodeSigningConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetFunctionCodeSigningConfigOutput,
smithy_http::result::SdkError<crate::error::GetFunctionCodeSigningConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetFunctionConcurrency<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_function_concurrency_input::Builder,
}
impl<C> GetFunctionConcurrency<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetFunctionConcurrencyOutput,
smithy_http::result::SdkError<crate::error::GetFunctionConcurrencyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetFunctionConfiguration<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_function_configuration_input::Builder,
}
impl<C> GetFunctionConfiguration<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetFunctionConfigurationOutput,
smithy_http::result::SdkError<crate::error::GetFunctionConfigurationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Specify a version or alias to get details about a published version of the function.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetFunctionEventInvokeConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_function_event_invoke_config_input::Builder,
}
impl<C> GetFunctionEventInvokeConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetFunctionEventInvokeConfigOutput,
smithy_http::result::SdkError<crate::error::GetFunctionEventInvokeConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>A version number or alias name.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetLayerVersion<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_layer_version_input::Builder,
}
impl<C> GetLayerVersion<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLayerVersionOutput,
smithy_http::result::SdkError<crate::error::GetLayerVersionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name or Amazon Resource Name (ARN) of the layer.</p>
pub fn layer_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_name(input);
self
}
pub fn set_layer_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_layer_name(input);
self
}
/// <p>The version number.</p>
pub fn version_number(mut self, input: i64) -> Self {
self.inner = self.inner.version_number(input);
self
}
pub fn set_version_number(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_version_number(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetLayerVersionByArn<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_layer_version_by_arn_input::Builder,
}
impl<C> GetLayerVersionByArn<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLayerVersionByArnOutput,
smithy_http::result::SdkError<crate::error::GetLayerVersionByArnError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The ARN of the layer version.</p>
pub fn arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.arn(input);
self
}
pub fn set_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_arn(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetLayerVersionPolicy<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_layer_version_policy_input::Builder,
}
impl<C> GetLayerVersionPolicy<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLayerVersionPolicyOutput,
smithy_http::result::SdkError<crate::error::GetLayerVersionPolicyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name or Amazon Resource Name (ARN) of the layer.</p>
pub fn layer_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_name(input);
self
}
pub fn set_layer_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_layer_name(input);
self
}
/// <p>The version number.</p>
pub fn version_number(mut self, input: i64) -> Self {
self.inner = self.inner.version_number(input);
self
}
pub fn set_version_number(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_version_number(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetPolicy<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_policy_input::Builder,
}
impl<C> GetPolicy<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetPolicyOutput,
smithy_http::result::SdkError<crate::error::GetPolicyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Specify a version or alias to get the policy for that resource.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetProvisionedConcurrencyConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::get_provisioned_concurrency_config_input::Builder,
}
impl<C> GetProvisionedConcurrencyConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetProvisionedConcurrencyConfigOutput,
smithy_http::result::SdkError<crate::error::GetProvisionedConcurrencyConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The version number or alias name.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct Invoke<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::invoke_input::Builder,
}
impl<C> Invoke<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::InvokeOutput,
smithy_http::result::SdkError<crate::error::InvokeError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Choose from the following options.</p>
/// <ul>
/// <li>
/// <p>
/// <code>RequestResponse</code> (default) - Invoke the function synchronously. Keep the connection open until
/// the function returns a response or times out. The API response includes the function response and additional
/// data.</p>
/// </li>
/// <li>
/// <p>
/// <code>Event</code> - Invoke the function asynchronously. Send events that fail multiple times to the
/// function's dead-letter queue (if it's configured). The API response only includes a status code.</p>
/// </li>
/// <li>
/// <p>
/// <code>DryRun</code> - Validate parameter values and verify that the user or role has permission to invoke
/// the function.</p>
/// </li>
/// </ul>
pub fn invocation_type(mut self, input: crate::model::InvocationType) -> Self {
self.inner = self.inner.invocation_type(input);
self
}
pub fn set_invocation_type(
mut self,
input: std::option::Option<crate::model::InvocationType>,
) -> Self {
self.inner = self.inner.set_invocation_type(input);
self
}
/// <p>Set to <code>Tail</code> to include the execution log in the response.</p>
pub fn log_type(mut self, input: crate::model::LogType) -> Self {
self.inner = self.inner.log_type(input);
self
}
pub fn set_log_type(mut self, input: std::option::Option<crate::model::LogType>) -> Self {
self.inner = self.inner.set_log_type(input);
self
}
/// <p>Up to 3583 bytes of base64-encoded data about the invoking client to pass to the function in the context
/// object.</p>
pub fn client_context(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.client_context(input);
self
}
pub fn set_client_context(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_client_context(input);
self
}
/// <p>The JSON that you want to provide to your Lambda function as input.</p>
pub fn payload(mut self, input: smithy_types::Blob) -> Self {
self.inner = self.inner.payload(input);
self
}
pub fn set_payload(mut self, input: std::option::Option<smithy_types::Blob>) -> Self {
self.inner = self.inner.set_payload(input);
self
}
/// <p>Specify a version or alias to invoke a published version of the function.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct InvokeAsync<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::invoke_async_input::Builder,
}
impl<C> InvokeAsync<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::InvokeAsyncOutput,
smithy_http::result::SdkError<crate::error::InvokeAsyncError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The JSON that you want to provide to your Lambda function as input.</p>
pub fn invoke_args(mut self, input: smithy_http::byte_stream::ByteStream) -> Self {
self.inner = self.inner.invoke_args(input);
self
}
pub fn set_invoke_args(
mut self,
input: std::option::Option<smithy_http::byte_stream::ByteStream>,
) -> Self {
self.inner = self.inner.set_invoke_args(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListAliases<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_aliases_input::Builder,
}
impl<C> ListAliases<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListAliasesOutput,
smithy_http::result::SdkError<crate::error::ListAliasesError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Specify a function version to only list aliases that invoke that version.</p>
pub fn function_version(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_version(input);
self
}
pub fn set_function_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_version(input);
self
}
/// <p>Specify the pagination token that's returned by a previous request to retrieve the next page of results.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>Limit the number of aliases returned.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListCodeSigningConfigs<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_code_signing_configs_input::Builder,
}
impl<C> ListCodeSigningConfigs<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListCodeSigningConfigsOutput,
smithy_http::result::SdkError<crate::error::ListCodeSigningConfigsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Specify the pagination token that's returned by a previous request to retrieve the next page of results.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>Maximum number of items to return.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListEventSourceMappings<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_event_source_mappings_input::Builder,
}
impl<C> ListEventSourceMappings<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListEventSourceMappingsOutput,
smithy_http::result::SdkError<crate::error::ListEventSourceMappingsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The Amazon Resource Name (ARN) of the event source.</p>
/// <ul>
/// <li>
/// <p>
/// <b>Amazon Kinesis</b> - The ARN of the data stream or a stream consumer.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon DynamoDB Streams</b> - The ARN of the stream.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon Simple Queue Service</b> - The ARN of the queue.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon Managed Streaming for Apache Kafka</b> - The ARN of the cluster.</p>
/// </li>
/// </ul>
pub fn event_source_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.event_source_arn(input);
self
}
pub fn set_event_source_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_event_source_arn(input);
self
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Version or Alias ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>A pagination token returned by a previous call.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>The maximum number of event source mappings to return.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListFunctionEventInvokeConfigs<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_function_event_invoke_configs_input::Builder,
}
impl<C> ListFunctionEventInvokeConfigs<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListFunctionEventInvokeConfigsOutput,
smithy_http::result::SdkError<crate::error::ListFunctionEventInvokeConfigsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Specify the pagination token that's returned by a previous request to retrieve the next page of results.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>The maximum number of configurations to return.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListFunctions<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_functions_input::Builder,
}
impl<C> ListFunctions<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListFunctionsOutput,
smithy_http::result::SdkError<crate::error::ListFunctionsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>For Lambda@Edge functions, the AWS Region of the master function. For example, <code>us-east-1</code> filters
/// the list of functions to only include Lambda@Edge functions replicated from a master function in US East (N.
/// Virginia). If specified, you must set <code>FunctionVersion</code> to <code>ALL</code>.</p>
pub fn master_region(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.master_region(input);
self
}
pub fn set_master_region(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_master_region(input);
self
}
/// <p>Set to <code>ALL</code> to include entries for all published versions of each function.</p>
pub fn function_version(mut self, input: crate::model::FunctionVersion) -> Self {
self.inner = self.inner.function_version(input);
self
}
pub fn set_function_version(
mut self,
input: std::option::Option<crate::model::FunctionVersion>,
) -> Self {
self.inner = self.inner.set_function_version(input);
self
}
/// <p>Specify the pagination token that's returned by a previous request to retrieve the next page of results.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>The maximum number of functions to return in the response. Note that <code>ListFunctions</code> returns a maximum of 50 items in each response,
/// even if you set the number higher.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListFunctionsByCodeSigningConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_functions_by_code_signing_config_input::Builder,
}
impl<C> ListFunctionsByCodeSigningConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListFunctionsByCodeSigningConfigOutput,
smithy_http::result::SdkError<crate::error::ListFunctionsByCodeSigningConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The The Amazon Resource Name (ARN) of the code signing configuration.</p>
pub fn code_signing_config_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.code_signing_config_arn(input);
self
}
pub fn set_code_signing_config_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_code_signing_config_arn(input);
self
}
/// <p>Specify the pagination token that's returned by a previous request to retrieve the next page of results.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>Maximum number of items to return.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListLayers<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_layers_input::Builder,
}
impl<C> ListLayers<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListLayersOutput,
smithy_http::result::SdkError<crate::error::ListLayersError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A runtime identifier. For example, <code>go1.x</code>.</p>
pub fn compatible_runtime(mut self, input: crate::model::Runtime) -> Self {
self.inner = self.inner.compatible_runtime(input);
self
}
pub fn set_compatible_runtime(
mut self,
input: std::option::Option<crate::model::Runtime>,
) -> Self {
self.inner = self.inner.set_compatible_runtime(input);
self
}
/// <p>A pagination token returned by a previous call.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>The maximum number of layers to return.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListLayerVersions<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_layer_versions_input::Builder,
}
impl<C> ListLayerVersions<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListLayerVersionsOutput,
smithy_http::result::SdkError<crate::error::ListLayerVersionsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>A runtime identifier. For example, <code>go1.x</code>.</p>
pub fn compatible_runtime(mut self, input: crate::model::Runtime) -> Self {
self.inner = self.inner.compatible_runtime(input);
self
}
pub fn set_compatible_runtime(
mut self,
input: std::option::Option<crate::model::Runtime>,
) -> Self {
self.inner = self.inner.set_compatible_runtime(input);
self
}
/// <p>The name or Amazon Resource Name (ARN) of the layer.</p>
pub fn layer_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_name(input);
self
}
pub fn set_layer_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_layer_name(input);
self
}
/// <p>A pagination token returned by a previous call.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>The maximum number of versions to return.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListProvisionedConcurrencyConfigs<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_provisioned_concurrency_configs_input::Builder,
}
impl<C> ListProvisionedConcurrencyConfigs<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListProvisionedConcurrencyConfigsOutput,
smithy_http::result::SdkError<crate::error::ListProvisionedConcurrencyConfigsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Specify the pagination token that's returned by a previous request to retrieve the next page of results.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>Specify a number to limit the number of configurations returned.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListTags<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_tags_input::Builder,
}
impl<C> ListTags<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListTagsOutput,
smithy_http::result::SdkError<crate::error::ListTagsError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The function's Amazon Resource Name (ARN).</p>
pub fn resource(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource(input);
self
}
pub fn set_resource(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListVersionsByFunction<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::list_versions_by_function_input::Builder,
}
impl<C> ListVersionsByFunction<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListVersionsByFunctionOutput,
smithy_http::result::SdkError<crate::error::ListVersionsByFunctionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Specify the pagination token that's returned by a previous request to retrieve the next page of results.</p>
pub fn marker(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.marker(input);
self
}
pub fn set_marker(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_marker(input);
self
}
/// <p>The maximum number of versions to return.</p>
pub fn max_items(mut self, input: i32) -> Self {
self.inner = self.inner.max_items(input);
self
}
pub fn set_max_items(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_items(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct PublishLayerVersion<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::publish_layer_version_input::Builder,
}
impl<C> PublishLayerVersion<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::PublishLayerVersionOutput,
smithy_http::result::SdkError<crate::error::PublishLayerVersionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name or Amazon Resource Name (ARN) of the layer.</p>
pub fn layer_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_name(input);
self
}
pub fn set_layer_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_layer_name(input);
self
}
/// <p>The description of the version.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The function layer archive.</p>
pub fn content(mut self, input: crate::model::LayerVersionContentInput) -> Self {
self.inner = self.inner.content(input);
self
}
pub fn set_content(
mut self,
input: std::option::Option<crate::model::LayerVersionContentInput>,
) -> Self {
self.inner = self.inner.set_content(input);
self
}
/// <p>A list of compatible <a href="https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html">function
/// runtimes</a>. Used for filtering with <a>ListLayers</a> and <a>ListLayerVersions</a>.</p>
pub fn compatible_runtimes(mut self, inp: impl Into<crate::model::Runtime>) -> Self {
self.inner = self.inner.compatible_runtimes(inp);
self
}
pub fn set_compatible_runtimes(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Runtime>>,
) -> Self {
self.inner = self.inner.set_compatible_runtimes(input);
self
}
/// <p>The layer's software license. It can be any of the following:</p>
/// <ul>
/// <li>
/// <p>An <a href="https://spdx.org/licenses/">SPDX license identifier</a>. For example,
/// <code>MIT</code>.</p>
/// </li>
/// <li>
/// <p>The URL of a license hosted on the internet. For example,
/// <code>https://opensource.org/licenses/MIT</code>.</p>
/// </li>
/// <li>
/// <p>The full text of the license.</p>
/// </li>
/// </ul>
pub fn license_info(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.license_info(input);
self
}
pub fn set_license_info(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_license_info(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct PublishVersion<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::publish_version_input::Builder,
}
impl<C> PublishVersion<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::PublishVersionOutput,
smithy_http::result::SdkError<crate::error::PublishVersionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Only publish a version if the hash value matches the value that's specified. Use this option to avoid
/// publishing a version if the function code has changed since you last updated it. You can get the hash for the
/// version that you uploaded from the output of <a>UpdateFunctionCode</a>.</p>
pub fn code_sha256(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.code_sha256(input);
self
}
pub fn set_code_sha256(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_code_sha256(input);
self
}
/// <p>A description for the version to override the description in the function configuration.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>Only update the function if the revision ID matches the ID that's specified. Use this option to avoid
/// publishing a version if the function configuration has changed since you last updated it.</p>
pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.revision_id(input);
self
}
pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_revision_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct PutFunctionCodeSigningConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::put_function_code_signing_config_input::Builder,
}
impl<C> PutFunctionCodeSigningConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutFunctionCodeSigningConfigOutput,
smithy_http::result::SdkError<crate::error::PutFunctionCodeSigningConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The The Amazon Resource Name (ARN) of the code signing configuration.</p>
pub fn code_signing_config_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.code_signing_config_arn(input);
self
}
pub fn set_code_signing_config_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_code_signing_config_arn(input);
self
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct PutFunctionConcurrency<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::put_function_concurrency_input::Builder,
}
impl<C> PutFunctionConcurrency<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutFunctionConcurrencyOutput,
smithy_http::result::SdkError<crate::error::PutFunctionConcurrencyError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The number of simultaneous executions to reserve for the function.</p>
pub fn reserved_concurrent_executions(mut self, input: i32) -> Self {
self.inner = self.inner.reserved_concurrent_executions(input);
self
}
pub fn set_reserved_concurrent_executions(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_reserved_concurrent_executions(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct PutFunctionEventInvokeConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::put_function_event_invoke_config_input::Builder,
}
impl<C> PutFunctionEventInvokeConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutFunctionEventInvokeConfigOutput,
smithy_http::result::SdkError<crate::error::PutFunctionEventInvokeConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>A version number or alias name.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
/// <p>The maximum number of times to retry when the function returns an error.</p>
pub fn maximum_retry_attempts(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_retry_attempts(input);
self
}
pub fn set_maximum_retry_attempts(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_maximum_retry_attempts(input);
self
}
/// <p>The maximum age of a request that Lambda sends to a function for processing.</p>
pub fn maximum_event_age_in_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_event_age_in_seconds(input);
self
}
pub fn set_maximum_event_age_in_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_maximum_event_age_in_seconds(input);
self
}
/// <p>A destination for events after they have been sent to a function for processing.</p>
/// <p class="title">
/// <b>Destinations</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function</b> - The Amazon Resource Name (ARN) of a Lambda function.</p>
/// </li>
/// <li>
/// <p>
/// <b>Queue</b> - The ARN of an SQS queue.</p>
/// </li>
/// <li>
/// <p>
/// <b>Topic</b> - The ARN of an SNS topic.</p>
/// </li>
/// <li>
/// <p>
/// <b>Event Bus</b> - The ARN of an Amazon EventBridge event bus.</p>
/// </li>
/// </ul>
pub fn destination_config(mut self, input: crate::model::DestinationConfig) -> Self {
self.inner = self.inner.destination_config(input);
self
}
pub fn set_destination_config(
mut self,
input: std::option::Option<crate::model::DestinationConfig>,
) -> Self {
self.inner = self.inner.set_destination_config(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct PutProvisionedConcurrencyConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::put_provisioned_concurrency_config_input::Builder,
}
impl<C> PutProvisionedConcurrencyConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutProvisionedConcurrencyConfigOutput,
smithy_http::result::SdkError<crate::error::PutProvisionedConcurrencyConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The version number or alias name.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
/// <p>The amount of provisioned concurrency to allocate for the version or alias.</p>
pub fn provisioned_concurrent_executions(mut self, input: i32) -> Self {
self.inner = self.inner.provisioned_concurrent_executions(input);
self
}
pub fn set_provisioned_concurrent_executions(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_provisioned_concurrent_executions(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct RemoveLayerVersionPermission<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::remove_layer_version_permission_input::Builder,
}
impl<C> RemoveLayerVersionPermission<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::RemoveLayerVersionPermissionOutput,
smithy_http::result::SdkError<crate::error::RemoveLayerVersionPermissionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name or Amazon Resource Name (ARN) of the layer.</p>
pub fn layer_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layer_name(input);
self
}
pub fn set_layer_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_layer_name(input);
self
}
/// <p>The version number.</p>
pub fn version_number(mut self, input: i64) -> Self {
self.inner = self.inner.version_number(input);
self
}
pub fn set_version_number(mut self, input: std::option::Option<i64>) -> Self {
self.inner = self.inner.set_version_number(input);
self
}
/// <p>The identifier that was specified when the statement was added.</p>
pub fn statement_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.statement_id(input);
self
}
pub fn set_statement_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_statement_id(input);
self
}
/// <p>Only update the policy if the revision ID matches the ID specified. Use this option to avoid modifying a
/// policy that has changed since you last read it.</p>
pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.revision_id(input);
self
}
pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_revision_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct RemovePermission<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::remove_permission_input::Builder,
}
impl<C> RemovePermission<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::RemovePermissionOutput,
smithy_http::result::SdkError<crate::error::RemovePermissionError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>Statement ID of the permission to remove.</p>
pub fn statement_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.statement_id(input);
self
}
pub fn set_statement_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_statement_id(input);
self
}
/// <p>Specify a version or alias to remove permissions from a published version of the function.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
/// <p>Only update the policy if the revision ID matches the ID that's specified. Use this option to avoid modifying a
/// policy that has changed since you last read it.</p>
pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.revision_id(input);
self
}
pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_revision_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct TagResource<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::tag_resource_input::Builder,
}
impl<C> TagResource<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::TagResourceOutput,
smithy_http::result::SdkError<crate::error::TagResourceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The function's Amazon Resource Name (ARN).</p>
pub fn resource(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource(input);
self
}
pub fn set_resource(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource(input);
self
}
/// <p>A list of tags to apply to the function.</p>
pub fn tags(
mut self,
k: impl Into<std::string::String>,
v: impl Into<std::string::String>,
) -> Self {
self.inner = self.inner.tags(k, v);
self
}
pub fn set_tags(
mut self,
input: std::option::Option<
std::collections::HashMap<std::string::String, std::string::String>,
>,
) -> Self {
self.inner = self.inner.set_tags(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UntagResource<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::untag_resource_input::Builder,
}
impl<C> UntagResource<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UntagResourceOutput,
smithy_http::result::SdkError<crate::error::UntagResourceError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The function's Amazon Resource Name (ARN).</p>
pub fn resource(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.resource(input);
self
}
pub fn set_resource(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_resource(input);
self
}
/// <p>A list of tag keys to remove from the function.</p>
pub fn tag_keys(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.tag_keys(inp);
self
}
pub fn set_tag_keys(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_tag_keys(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateAlias<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_alias_input::Builder,
}
impl<C> UpdateAlias<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateAliasOutput,
smithy_http::result::SdkError<crate::error::UpdateAliasError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The name of the alias.</p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>The function version that the alias invokes.</p>
pub fn function_version(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_version(input);
self
}
pub fn set_function_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_version(input);
self
}
/// <p>A description of the alias.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html#configuring-alias-routing">routing
/// configuration</a> of the alias.</p>
pub fn routing_config(mut self, input: crate::model::AliasRoutingConfiguration) -> Self {
self.inner = self.inner.routing_config(input);
self
}
pub fn set_routing_config(
mut self,
input: std::option::Option<crate::model::AliasRoutingConfiguration>,
) -> Self {
self.inner = self.inner.set_routing_config(input);
self
}
/// <p>Only update the alias if the revision ID matches the ID that's specified. Use this option to avoid modifying
/// an alias that has changed since you last read it.</p>
pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.revision_id(input);
self
}
pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_revision_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateCodeSigningConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_code_signing_config_input::Builder,
}
impl<C> UpdateCodeSigningConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateCodeSigningConfigOutput,
smithy_http::result::SdkError<crate::error::UpdateCodeSigningConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The The Amazon Resource Name (ARN) of the code signing configuration.</p>
pub fn code_signing_config_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.code_signing_config_arn(input);
self
}
pub fn set_code_signing_config_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_code_signing_config_arn(input);
self
}
/// <p>Descriptive name for this code signing configuration.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>Signing profiles for this code signing configuration.</p>
pub fn allowed_publishers(mut self, input: crate::model::AllowedPublishers) -> Self {
self.inner = self.inner.allowed_publishers(input);
self
}
pub fn set_allowed_publishers(
mut self,
input: std::option::Option<crate::model::AllowedPublishers>,
) -> Self {
self.inner = self.inner.set_allowed_publishers(input);
self
}
/// <p>The code signing policy.</p>
pub fn code_signing_policies(mut self, input: crate::model::CodeSigningPolicies) -> Self {
self.inner = self.inner.code_signing_policies(input);
self
}
pub fn set_code_signing_policies(
mut self,
input: std::option::Option<crate::model::CodeSigningPolicies>,
) -> Self {
self.inner = self.inner.set_code_signing_policies(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateEventSourceMapping<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_event_source_mapping_input::Builder,
}
impl<C> UpdateEventSourceMapping<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateEventSourceMappingOutput,
smithy_http::result::SdkError<crate::error::UpdateEventSourceMappingError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The identifier of the event source mapping.</p>
pub fn uuid(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.uuid(input);
self
}
pub fn set_uuid(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_uuid(input);
self
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Version or Alias ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:MyFunction:PROD</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:MyFunction</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it's limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>If true, the event source mapping is active. Set to false to pause polling and invocation.</p>
pub fn enabled(mut self, input: bool) -> Self {
self.inner = self.inner.enabled(input);
self
}
pub fn set_enabled(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_enabled(input);
self
}
/// <p>The maximum number of items to retrieve in a single batch.</p>
/// <ul>
/// <li>
/// <p>
/// <b>Amazon Kinesis</b> - Default 100. Max 10,000.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon DynamoDB Streams</b> - Default 100. Max 1,000.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon Simple Queue Service</b> - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.</p>
/// </li>
/// <li>
/// <p>
/// <b>Amazon Managed Streaming for Apache Kafka</b> - Default 100. Max 10,000.</p>
/// </li>
/// <li>
/// <p>
/// <b>Self-Managed Apache Kafka</b> - Default 100. Max 10,000.</p>
/// </li>
/// </ul>
pub fn batch_size(mut self, input: i32) -> Self {
self.inner = self.inner.batch_size(input);
self
}
pub fn set_batch_size(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_batch_size(input);
self
}
/// <p>(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds.</p>
pub fn maximum_batching_window_in_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_batching_window_in_seconds(input);
self
}
pub fn set_maximum_batching_window_in_seconds(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_maximum_batching_window_in_seconds(input);
self
}
/// <p>(Streams) An Amazon SQS queue or Amazon SNS topic destination for discarded records.</p>
pub fn destination_config(mut self, input: crate::model::DestinationConfig) -> Self {
self.inner = self.inner.destination_config(input);
self
}
pub fn set_destination_config(
mut self,
input: std::option::Option<crate::model::DestinationConfig>,
) -> Self {
self.inner = self.inner.set_destination_config(input);
self
}
/// <p>(Streams) Discard records older than the specified age. The default value is infinite (-1).</p>
pub fn maximum_record_age_in_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_record_age_in_seconds(input);
self
}
pub fn set_maximum_record_age_in_seconds(
mut self,
input: std::option::Option<i32>,
) -> Self {
self.inner = self.inner.set_maximum_record_age_in_seconds(input);
self
}
/// <p>(Streams) If the function returns an error, split the batch in two and retry.</p>
pub fn bisect_batch_on_function_error(mut self, input: bool) -> Self {
self.inner = self.inner.bisect_batch_on_function_error(input);
self
}
pub fn set_bisect_batch_on_function_error(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.inner = self.inner.set_bisect_batch_on_function_error(input);
self
}
/// <p>(Streams) Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records will be retried until the record expires.</p>
pub fn maximum_retry_attempts(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_retry_attempts(input);
self
}
pub fn set_maximum_retry_attempts(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_maximum_retry_attempts(input);
self
}
/// <p>(Streams) The number of batches to process from each shard concurrently.</p>
pub fn parallelization_factor(mut self, input: i32) -> Self {
self.inner = self.inner.parallelization_factor(input);
self
}
pub fn set_parallelization_factor(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_parallelization_factor(input);
self
}
/// <p>An array of the authentication protocol, or the VPC components to secure your event source.</p>
pub fn source_access_configurations(
mut self,
inp: impl Into<crate::model::SourceAccessConfiguration>,
) -> Self {
self.inner = self.inner.source_access_configurations(inp);
self
}
pub fn set_source_access_configurations(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SourceAccessConfiguration>>,
) -> Self {
self.inner = self.inner.set_source_access_configurations(input);
self
}
/// <p>(Streams) The duration in seconds of a processing window. The range is between 1 second up to 900 seconds.</p>
pub fn tumbling_window_in_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.tumbling_window_in_seconds(input);
self
}
pub fn set_tumbling_window_in_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_tumbling_window_in_seconds(input);
self
}
/// <p>(Streams) A list of current response type enums applied to the event source mapping.</p>
pub fn function_response_types(
mut self,
inp: impl Into<crate::model::FunctionResponseType>,
) -> Self {
self.inner = self.inner.function_response_types(inp);
self
}
pub fn set_function_response_types(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::FunctionResponseType>>,
) -> Self {
self.inner = self.inner.set_function_response_types(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateFunctionCode<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_function_code_input::Builder,
}
impl<C> UpdateFunctionCode<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateFunctionCodeOutput,
smithy_http::result::SdkError<crate::error::UpdateFunctionCodeError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self |
/// <p>The base64-encoded contents of the deployment package. AWS SDK and AWS CLI clients handle the encoding for
/// you.</p>
pub fn zip_file(mut self, input: smithy_types::Blob) -> Self {
self.inner = self.inner.zip_file(input);
self
}
pub fn set_zip_file(mut self, input: std::option::Option<smithy_types::Blob>) -> Self {
self.inner = self.inner.set_zip_file(input);
self
}
/// <p>An Amazon S3 bucket in the same AWS Region as your function. The bucket can be in a different AWS account.</p>
pub fn s3_bucket(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3_bucket(input);
self
}
pub fn set_s3_bucket(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_s3_bucket(input);
self
}
/// <p>The Amazon S3 key of the deployment package.</p>
pub fn s3_key(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3_key(input);
self
}
pub fn set_s3_key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_s3_key(input);
self
}
/// <p>For versioned objects, the version of the deployment package object to use.</p>
pub fn s3_object_version(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3_object_version(input);
self
}
pub fn set_s3_object_version(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_s3_object_version(input);
self
}
/// <p>URI of a container image in the Amazon ECR registry.</p>
pub fn image_uri(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.image_uri(input);
self
}
pub fn set_image_uri(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_image_uri(input);
self
}
/// <p>Set to true to publish a new version of the function after updating the code. This has the same effect as
/// calling <a>PublishVersion</a> separately.</p>
pub fn publish(mut self, input: bool) -> Self {
self.inner = self.inner.publish(input);
self
}
pub fn set_publish(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_publish(input);
self
}
/// <p>Set to true to validate the request parameters and access permissions without modifying the function
/// code.</p>
pub fn dry_run(mut self, input: bool) -> Self {
self.inner = self.inner.dry_run(input);
self
}
pub fn set_dry_run(mut self, input: std::option::Option<bool>) -> Self {
self.inner = self.inner.set_dry_run(input);
self
}
/// <p>Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a
/// function that has changed since you last read it.</p>
pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.revision_id(input);
self
}
pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_revision_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateFunctionConfiguration<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_function_configuration_input::Builder,
}
impl<C> UpdateFunctionConfiguration<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateFunctionConfigurationOutput,
smithy_http::result::SdkError<crate::error::UpdateFunctionConfigurationError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64
/// characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>The Amazon Resource Name (ARN) of the function's execution role.</p>
pub fn role(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.role(input);
self
}
pub fn set_role(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_role(input);
self
}
/// <p>The name of the method within your code that Lambda calls to execute your function. The format includes the
/// file name. It can also include namespaces and other qualifiers, depending on the runtime. For more information,
/// see <a href="https://docs.aws.amazon.com/lambda/latest/dg/programming-model-v2.html">Programming Model</a>.</p>
pub fn handler(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.handler(input);
self
}
pub fn set_handler(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_handler(input);
self
}
/// <p>A description of the function.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.description(input);
self
}
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_description(input);
self
}
/// <p>The amount of time that Lambda allows a function to run before stopping it. The default is 3 seconds. The
/// maximum allowed value is 900 seconds.</p>
pub fn timeout(mut self, input: i32) -> Self {
self.inner = self.inner.timeout(input);
self
}
pub fn set_timeout(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_timeout(input);
self
}
/// <p>The amount of memory available to the function at runtime. Increasing the function's memory also increases its CPU
/// allocation. The default value is 128 MB. The value can be any multiple of 1 MB.</p>
pub fn memory_size(mut self, input: i32) -> Self {
self.inner = self.inner.memory_size(input);
self
}
pub fn set_memory_size(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_memory_size(input);
self
}
/// <p>For network connectivity to AWS resources in a VPC, specify a list of security groups and subnets in the VPC.
/// When you connect a function to a VPC, it can only access resources and the internet through that VPC. For more
/// information, see <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-vpc.html">VPC Settings</a>.</p>
pub fn vpc_config(mut self, input: crate::model::VpcConfig) -> Self {
self.inner = self.inner.vpc_config(input);
self
}
pub fn set_vpc_config(
mut self,
input: std::option::Option<crate::model::VpcConfig>,
) -> Self {
self.inner = self.inner.set_vpc_config(input);
self
}
/// <p>Environment variables that are accessible from function code during execution.</p>
pub fn environment(mut self, input: crate::model::Environment) -> Self {
self.inner = self.inner.environment(input);
self
}
pub fn set_environment(
mut self,
input: std::option::Option<crate::model::Environment>,
) -> Self {
self.inner = self.inner.set_environment(input);
self
}
/// <p>The identifier of the function's <a href="https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html">runtime</a>.</p>
pub fn runtime(mut self, input: crate::model::Runtime) -> Self {
self.inner = self.inner.runtime(input);
self
}
pub fn set_runtime(mut self, input: std::option::Option<crate::model::Runtime>) -> Self {
self.inner = self.inner.set_runtime(input);
self
}
/// <p>A dead letter queue configuration that specifies the queue or topic where Lambda sends asynchronous events
/// when they fail processing. For more information, see <a href="https://docs.aws.amazon.com/lambda/latest/dg/invocation-async.html#dlq">Dead Letter Queues</a>.</p>
pub fn dead_letter_config(mut self, input: crate::model::DeadLetterConfig) -> Self {
self.inner = self.inner.dead_letter_config(input);
self
}
pub fn set_dead_letter_config(
mut self,
input: std::option::Option<crate::model::DeadLetterConfig>,
) -> Self {
self.inner = self.inner.set_dead_letter_config(input);
self
}
/// <p>The ARN of the AWS Key Management Service (AWS KMS) key that's used to encrypt your function's environment
/// variables. If it's not provided, AWS Lambda uses a default service key.</p>
pub fn kms_key_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.kms_key_arn(input);
self
}
pub fn set_kms_key_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_kms_key_arn(input);
self
}
/// <p>Set <code>Mode</code> to <code>Active</code> to sample and trace a subset of incoming requests with AWS
/// X-Ray.</p>
pub fn tracing_config(mut self, input: crate::model::TracingConfig) -> Self {
self.inner = self.inner.tracing_config(input);
self
}
pub fn set_tracing_config(
mut self,
input: std::option::Option<crate::model::TracingConfig>,
) -> Self {
self.inner = self.inner.set_tracing_config(input);
self
}
/// <p>Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a
/// function that has changed since you last read it.</p>
pub fn revision_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.revision_id(input);
self
}
pub fn set_revision_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_revision_id(input);
self
}
/// <p>A list of <a href="https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html">function layers</a>
/// to add to the function's execution environment. Specify each layer by its ARN, including the version.</p>
pub fn layers(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.layers(inp);
self
}
pub fn set_layers(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_layers(input);
self
}
/// <p>Connection settings for an Amazon EFS file system.</p>
pub fn file_system_configs(
mut self,
inp: impl Into<crate::model::FileSystemConfig>,
) -> Self {
self.inner = self.inner.file_system_configs(inp);
self
}
pub fn set_file_system_configs(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::FileSystemConfig>>,
) -> Self {
self.inner = self.inner.set_file_system_configs(input);
self
}
/// <p>
/// <a href="https://docs.aws.amazon.com/lambda/latest/dg/images-parms.html">Container image configuration
/// values</a> that override the values in the container image Dockerfile.</p>
pub fn image_config(mut self, input: crate::model::ImageConfig) -> Self {
self.inner = self.inner.image_config(input);
self
}
pub fn set_image_config(
mut self,
input: std::option::Option<crate::model::ImageConfig>,
) -> Self {
self.inner = self.inner.set_image_config(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct UpdateFunctionEventInvokeConfig<C = aws_hyper::DynConnector> {
handle: std::sync::Arc<super::Handle<C>>,
inner: crate::input::update_function_event_invoke_config_input::Builder,
}
impl<C> UpdateFunctionEventInvokeConfig<C> {
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateFunctionEventInvokeConfigOutput,
smithy_http::result::SdkError<crate::error::UpdateFunctionEventInvokeConfigError>,
>
where
C: aws_hyper::SmithyConnector,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the Lambda function, version, or alias.</p>
/// <p class="title">
/// <b>Name formats</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function name</b> - <code>my-function</code> (name-only), <code>my-function:v1</code> (with alias).</p>
/// </li>
/// <li>
/// <p>
/// <b>Function ARN</b> - <code>arn:aws:lambda:us-west-2:123456789012:function:my-function</code>.</p>
/// </li>
/// <li>
/// <p>
/// <b>Partial ARN</b> - <code>123456789012:function:my-function</code>.</p>
/// </li>
/// </ul>
/// <p>You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN.
/// If you specify only the function name, it is limited to 64 characters in length.</p>
pub fn function_name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.function_name(input);
self
}
pub fn set_function_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_function_name(input);
self
}
/// <p>A version number or alias name.</p>
pub fn qualifier(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.qualifier(input);
self
}
pub fn set_qualifier(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_qualifier(input);
self
}
/// <p>The maximum number of times to retry when the function returns an error.</p>
pub fn maximum_retry_attempts(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_retry_attempts(input);
self
}
pub fn set_maximum_retry_attempts(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_maximum_retry_attempts(input);
self
}
/// <p>The maximum age of a request that Lambda sends to a function for processing.</p>
pub fn maximum_event_age_in_seconds(mut self, input: i32) -> Self {
self.inner = self.inner.maximum_event_age_in_seconds(input);
self
}
pub fn set_maximum_event_age_in_seconds(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_maximum_event_age_in_seconds(input);
self
}
/// <p>A destination for events after they have been sent to a function for processing.</p>
/// <p class="title">
/// <b>Destinations</b>
/// </p>
/// <ul>
/// <li>
/// <p>
/// <b>Function</b> - The Amazon Resource Name (ARN) of a Lambda function.</p>
/// </li>
/// <li>
/// <p>
/// <b>Queue</b> - The ARN of an SQS queue.</p>
/// </li>
/// <li>
/// <p>
/// <b>Topic</b> - The ARN of an SNS topic.</p>
/// </li>
/// <li>
/// <p>
/// <b>Event Bus</b> - The ARN of an Amazon EventBridge event bus.</p>
/// </li>
/// </ul>
pub fn destination_config(mut self, input: crate::model::DestinationConfig) -> Self {
self.inner = self.inner.destination_config(input);
self
}
pub fn set_destination_config(
mut self,
input: std::option::Option<crate::model::DestinationConfig>,
) -> Self {
self.inner = self.inner.set_destination_config(input);
self
}
}
}
| {
self.inner = self.inner.set_function_name(input);
self
} |
tablets-controller.js | module.exports = { | getTablets: function (req, res, next) {
res.render('tablets/tablets');
}
} | |
error.rs | //! Enumerated errors for this API.
use std::error::Error as StdError;
use std::io::Error as IoError;
use hyper::error::Error as HttpError;
use serde_json::error::Error as CodecError;
/// Specialized result.
pub type Result<T> = ::std::result::Result<T, Error>;
/// Composite error type for the library.
#[derive(Debug)]
pub enum Error {
Io(IoError),
Codec(CodecError),
Http(HttpError),
Api { message: String, code: ApiErrorCode },
}
impl StdError for Error {
fn description(&self) -> &str {
match *self {
Error::Io(ref e) => e.description(),
Error::Codec(ref e) => e.description(),
Error::Http(ref e) => e.description(),
Error::Api { ref message, .. } => message.as_str(),
}
}
fn cause(&self) -> Option<&StdError> {
match *self {
Error::Io(ref e) => Some(e),
Error::Codec(ref e) => Some(e),
Error::Http(ref e) => Some(e),
_ => None,
}
}
}
impl ::std::fmt::Display for Error {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<IoError> for Error {
fn from(error: IoError) -> Error {
Error::Io(error)
}
}
impl From<CodecError> for Error {
fn from(error: CodecError) -> Error {
Error::Codec(error)
}
}
impl From<HttpError> for Error {
fn from(error: HttpError) -> Error {
Error::Http(error)
}
}
/// Pandora error codes.
#[derive(Debug)]
pub enum ApiErrorCode {
Unknown,
InternalError,
MaintenanceMode,
UrlParamMissingMethod,
UrlParamMissingAuthToken,
UrlParamMissingPartnerId,
UrlParamMissingUserId,
SecureProtocolRequired,
CertifiateRequired,
ParameterTypeMismatch,
ParameterMissing,
ParameterValueInvalid,
ApiVersionNotSupported,
LicensingRestrictions,
InsufficientConnectivity,
UnknownMethodName,
WrongProtocol,
ReadOnlyMode,
InvalidAuthToken,
InvalidPartnerOrUserLogin,
ListenerNotAuthorized,
UserNotAuthorized,
MaxStationsReached,
StationDoesNotExists,
ComplimentaryPeriodAlreadyInUse,
CallNotAllowed,
DeviceNotFound,
PartnerNotAuthroized,
InvalidUsername,
InvalidPassword,
UsernameAlreadyExists,
DeviceAlreadyAssociatedToAccount,
UpgradeDeviceModelInvalid,
ExplicitPinIncorrect,
ExplicitPinMalformed,
DeviceModelInvalid,
ZipCodeInvalid,
BirthYearInvalid,
BirthYearTooYoung,
InvalidCountryCode,
InvalidGender,
DeviceDisabled,
DailyTrialLimitReached,
InvalidSponsor,
UserAlreadyUserTrial,
PlaylistExceeded,
}
impl From<u32> for ApiErrorCode {
fn | (code: u32) -> Self {
match code {
0 => ApiErrorCode::InternalError,
1 => ApiErrorCode::MaintenanceMode,
2 => ApiErrorCode::UrlParamMissingMethod,
3 => ApiErrorCode::UrlParamMissingAuthToken,
4 => ApiErrorCode::UrlParamMissingPartnerId,
5 => ApiErrorCode::UrlParamMissingUserId,
6 => ApiErrorCode::SecureProtocolRequired,
7 => ApiErrorCode::CertifiateRequired,
8 => ApiErrorCode::ParameterTypeMismatch,
9 => ApiErrorCode::ParameterMissing,
10 => ApiErrorCode::ParameterValueInvalid,
11 => ApiErrorCode::ApiVersionNotSupported,
12 => ApiErrorCode::LicensingRestrictions,
13 => ApiErrorCode::InsufficientConnectivity,
14 => ApiErrorCode::UnknownMethodName,
15 => ApiErrorCode::WrongProtocol,
1000 => ApiErrorCode::ReadOnlyMode,
1001 => ApiErrorCode::InvalidAuthToken,
1002 => ApiErrorCode::InvalidPartnerOrUserLogin,
1003 => ApiErrorCode::ListenerNotAuthorized,
1004 => ApiErrorCode::UserNotAuthorized,
1005 => ApiErrorCode::MaxStationsReached,
1006 => ApiErrorCode::StationDoesNotExists,
1007 => ApiErrorCode::ComplimentaryPeriodAlreadyInUse,
1008 => ApiErrorCode::CallNotAllowed,
1009 => ApiErrorCode::DeviceNotFound,
1010 => ApiErrorCode::PartnerNotAuthroized,
1011 => ApiErrorCode::InvalidUsername,
1012 => ApiErrorCode::InvalidPassword,
1013 => ApiErrorCode::UsernameAlreadyExists,
1014 => ApiErrorCode::DeviceAlreadyAssociatedToAccount,
1015 => ApiErrorCode::UpgradeDeviceModelInvalid,
1018 => ApiErrorCode::ExplicitPinIncorrect,
1020 => ApiErrorCode::ExplicitPinMalformed,
1023 => ApiErrorCode::DeviceModelInvalid,
1024 => ApiErrorCode::ZipCodeInvalid,
1025 => ApiErrorCode::BirthYearInvalid,
1026 => ApiErrorCode::BirthYearTooYoung,
1027 => ApiErrorCode::InvalidCountryCode,
// TODO: Maybe not 1028, the code was 1027 (Repeated).
1028 => ApiErrorCode::InvalidGender,
1034 => ApiErrorCode::DeviceDisabled,
1035 => ApiErrorCode::DailyTrialLimitReached,
1036 => ApiErrorCode::InvalidSponsor,
1037 => ApiErrorCode::UserAlreadyUserTrial,
1039 => ApiErrorCode::PlaylistExceeded,
_ => ApiErrorCode::Unknown,
}
}
}
| from |
qu.py | import queue
import threading
import time
import concurrent.futures
q = queue.Queue(maxsize=10)
def | (name):
count = 1
while True:
q.put("包子 %s" % count)
print("做了包子", count)
count += 1
time.sleep(0.5)
def Consumer(name):
while True:
print("[%s] 取到[%s] 并且吃了它..." % (name, q.get()))
time.sleep(1)
p = threading.Thread(target=Producer, args=("Lily",))
c = threading.Thread(target=Consumer, args=("Lilei",))
c1 = threading.Thread(target=Consumer, args=("Ahi",))
p.start()
c.start()
c1.start()
| Producer |
utils.py | # Copyright (c) 2017-2018 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import shutil
import zipfile
import tempfile
from shutil import copy
import requests
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.exceptions import OperationRetry
from cloudify.utils import exception_to_error_cause
from cloudify_common_sdk._compat import urlparse, text_type
def generate_traceback_exception():
_, exc_value, exc_traceback = sys.exc_info()
response = exception_to_error_cause(exc_value, exc_traceback)
return response
def get_desired_value(key,
args,
instance_attr,
node_prop):
return (args.get(key) or
instance_attr.get(key) or
node_prop.get(key))
def update_attributes(_type, _key, _value):
ctx.instance.runtime_properties[_type][_key] = _value
def proxy_operation(operation):
|
def download_file(url, destination=None, keep_name=False):
"""Download file.
:param url: Location of the file to download
:type url: str
:param destination:
Location where the file should be saved (autogenerated by default)
:param keep_name: use the filename from the url as destination filename
:type destination: str | None
:returns: Location where the file was saved
:rtype: str
"""
CHUNK_SIZE = 1024
if not destination:
if keep_name:
path = urlparse(url).path
name = os.path.basename(path)
destination = os.path.join(tempfile.mkdtemp(), name)
else:
fd, destination = tempfile.mkstemp()
os.close(fd)
ctx.logger.info('Downloading {0} to {1}...'.format(url, destination))
try:
response = requests.get(url, stream=True)
except requests.exceptions.RequestException as ex:
raise NonRecoverableError(
'Failed to download {0}. ({1})'.format(url, text_type(ex)))
final_url = response.url
if final_url != url:
ctx.logger.debug('Redirected to {0}'.format(final_url))
try:
with open(destination, 'wb') as destination_file:
for chunk in response.iter_content(CHUNK_SIZE):
destination_file.write(chunk)
except IOError as ex:
raise NonRecoverableError(
'Failed to download {0}. ({1})'.format(url, text_type(ex)))
return destination
def get_local_path(source, destination=None, create_temp=False):
allowed_schemes = ['http', 'https']
if urlparse(source).scheme in allowed_schemes:
downloaded_file = download_file(source, destination, keep_name=True)
return downloaded_file
elif os.path.isfile(source):
if not destination and create_temp:
source_name = os.path.basename(source)
destination = os.path.join(tempfile.mkdtemp(), source_name)
if destination:
shutil.copy(source, destination)
return destination
else:
return source
else:
raise NonRecoverableError(
'You must provide either a path to a local file, or a remote URL '
'using one of the allowed schemes: {0}'.format(allowed_schemes))
def zip_folder(source, destination, include_folder=True):
ctx.logger.debug('Creating zip archive: {0}...'.format(destination))
with zipfile.ZipFile(destination, 'w') as zip_file:
for root, _, files in os.walk(source):
for filename in files:
file_path = os.path.join(root, filename)
source_dir = os.path.dirname(source) \
if include_folder else source
zip_file.write(
file_path, os.path.relpath(file_path, source_dir))
return destination
def zip_files(files):
source_folder = tempfile.mkdtemp()
destination_zip = source_folder + '.zip'
for path in files:
copy(path, source_folder)
zip_folder(source_folder, destination_zip, include_folder=False)
shutil.rmtree(source_folder)
return destination_zip
| def decorator(task, **kwargs):
def wrapper(**kwargs):
try:
kwargs['operation'] = operation
return task(**kwargs)
except OperationRetry:
response = generate_traceback_exception()
ctx.logger.error(
'Error traceback {0} with message {1}'.format(
response['traceback'], response['message']))
raise OperationRetry(
'Error: {0} while trying to run proxy task {1}'.format(
response['message'], operation))
except Exception:
response = generate_traceback_exception()
ctx.logger.error(
'Error traceback {0} with message {1}'.format(
response['traceback'], response['message']))
raise NonRecoverableError(
'Error: {0} while trying to run proxy task {1}'.format(
response['message'], operation))
return wrapper
return decorator |
positions.controller.ts | import { Controller, Post, Body, Get } from '@nestjs/common';
import { PositionsService } from './positions.service';
import { Position } from './positions.entity';
@Controller('positions')
export class PositionsController { | return this.positionsService.findAll();
}
@Post()
create(@Body() body: Position) {
return this.positionsService.create(body);
}
} | constructor(private readonly positionsService: PositionsService) {}
@Get()
findAll() { |
user.rs | use keyring;
use rpassword;
use preferences::{AppInfo, PreferencesMap, Preferences, PreferencesError};
use std::io;
const APP_INFO: AppInfo = AppInfo{name: "traffic", author: "Josh Mcguigan"};
const PREFS_KEY: &str = "user_prefs";
const PREFS_KEY_USERNAME : &str = "github_username";
const SERVICE : &str = "traffic";
pub struct Credential {
pub username: String,
pub password: String,
}
pub fn credential() -> Credential {
let username = username();
let password;
{
let keyring = keyring::Keyring::new(&SERVICE, &username);
password = keyring.get_password().expect("Could not find password in keychain");
}
Credential { username, password }
}
pub fn | (){
let mut preferences = load_preferences().expect("Error loading stored settings");
let username = preferences.get(PREFS_KEY_USERNAME).expect("Error loading username from stored settings").to_owned();
let keyring = keyring::Keyring::new(&SERVICE, &username);
let _ = keyring.delete_password();
preferences.remove(PREFS_KEY_USERNAME);
preferences.save(&APP_INFO, PREFS_KEY).expect("Failed to logout");
}
fn username() -> String {
let mut preferences = load_preferences().unwrap_or(PreferencesMap::new());
preferences.get(PREFS_KEY_USERNAME)
.map(|x| x.to_owned())
.unwrap_or_else(||{
println!("Enter your Github username:");
let mut buffer = String::new();
let _ = io::stdin().read_line(&mut buffer);
let username = buffer.trim().to_owned();
preferences.insert(PREFS_KEY_USERNAME.to_owned(), username.clone());
preferences.save(&APP_INFO, PREFS_KEY).expect("Failed to save username");
let password = rpassword::prompt_password_stdout("Enter your Github password (personal access token if 2FA is enabled):").unwrap();
{
let keyring = keyring::Keyring::new(&SERVICE, &username);
keyring.set_password(&password).expect("Failed to save password to keyring");
}
username
})
}
fn load_preferences() -> Result<PreferencesMap<String>, PreferencesError> {
PreferencesMap::<String>::load(&APP_INFO, PREFS_KEY)
}
| logout |
storage_test.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2017 Red Hat, Inc.
*
*/
package tests_test
import (
"fmt"
"path/filepath"
"strconv"
"strings"
"time"
hostdisk "kubevirt.io/kubevirt/pkg/host-disk"
expect "github.com/google/goexpect"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/pborman/uuid"
k8sv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
v1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/client-go/kubecli"
"kubevirt.io/client-go/log"
virtconfig "kubevirt.io/kubevirt/pkg/virt-config"
"kubevirt.io/kubevirt/tests"
)
const (
diskSerial = "FB-fb_18030C10002032"
)
type VMICreationFunc func(string) *v1.VirtualMachineInstance
var _ = Describe("Storage", func() {
var err error
var virtClient kubecli.KubevirtClient
BeforeEach(func() {
virtClient, err = kubecli.GetKubevirtClient()
tests.PanicOnError(err)
tests.BeforeTestCleanup()
})
Describe("Starting a VirtualMachineInstance", func() {
var _pvName string
var vmi *v1.VirtualMachineInstance
var nfsInitialized bool
initNFS := func() string {
if !nfsInitialized {
_pvName = "test-nfs" + rand.String(48)
// Prepare a NFS backed PV
By("Starting an NFS POD")
os := string(tests.ContainerDiskAlpine)
nfsIP := tests.CreateNFSTargetPOD(os)
// create a new PV and PVC (PVs can't be reused)
By("create a new NFS PV and PVC")
tests.CreateNFSPvAndPvc(_pvName, "5Gi", nfsIP, os)
nfsInitialized = true
}
return _pvName
}
BeforeEach(func() {
nfsInitialized = false
})
AfterEach(func() {
if nfsInitialized {
By("Deleting the VMI")
Expect(virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})).To(Succeed())
By("Waiting for VMI to disappear")
tests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
// PVs can't be reused
tests.DeletePvAndPvc(_pvName)
By("Deleting NFS pod")
Expect(virtClient.CoreV1().Pods(tests.NamespaceTestDefault).Delete(tests.NFSTargetName, &metav1.DeleteOptions{})).To(Succeed())
By("Waiting for NFS pod to disappear")
tests.WaitForPodToDisappearWithTimeout(tests.NFSTargetName, 120)
}
})
Context("[rfe_id:3106][crit:medium][vendor:[email protected]][level:component]with Alpine PVC", func() {
table.DescribeTable("should be successfully started", func(newVMI VMICreationFunc, storageEngine string) {
tests.SkipPVCTestIfRunnigOnKindInfra()
var ignoreWarnings bool
var pvName string
// Start the VirtualMachineInstance with the PVC attached
if storageEngine == "nfs" {
pvName = initNFS()
ignoreWarnings = true
} else {
pvName = tests.DiskAlpineHostPath
}
vmi = newVMI(pvName)
tests.RunVMIAndExpectLaunchWithIgnoreWarningArg(vmi, 120, ignoreWarnings)
By("Checking that the VirtualMachineInstance console has expected output")
expecter, err := tests.LoggedInAlpineExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
expecter.Close()
},
table.Entry("[test_id:3130]with Disk PVC", tests.NewRandomVMIWithPVC, ""),
table.Entry("[test_id:3131]with CDRom PVC", tests.NewRandomVMIWithCDRom, ""),
table.Entry("with NFS Disk PVC", tests.NewRandomVMIWithPVC, "nfs"),
)
table.DescribeTable("should be successfully started and stopped multiple times", func(newVMI VMICreationFunc) {
tests.SkipPVCTestIfRunnigOnKindInfra()
vmi = newVMI(tests.DiskAlpineHostPath)
num := 3
By("Starting and stopping the VirtualMachineInstance number of times")
for i := 1; i <= num; i++ {
vmi := tests.RunVMIAndExpectLaunch(vmi, 90)
// Verify console on last iteration to verify the VirtualMachineInstance is still booting properly
// after being restarted multiple times
if i == num {
By("Checking that the VirtualMachineInstance console has expected output")
expecter, err := tests.LoggedInAlpineExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
expecter.Close()
}
err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
tests.WaitForVirtualMachineToDisappearWithTimeout(vmi, 120)
}
},
table.Entry("[test_id:3132]with Disk PVC", tests.NewRandomVMIWithPVC),
table.Entry("[test_id:3133]with CDRom PVC", tests.NewRandomVMIWithCDRom),
)
})
Context("[rfe_id:3106][crit:medium][vendor:[email protected]][level:component]With an emptyDisk defined", func() {
// The following case is mostly similar to the alpine PVC test above, except using different VirtualMachineInstance.
It("[test_id:3134]should create a writeable emptyDisk with the right capacity", func() {
// Start the VirtualMachineInstance with the empty disk attached
vmi = tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskCirros), "echo hi!")
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: "emptydisk1",
DiskDevice: v1.DiskDevice{
Disk: &v1.DiskTarget{
Bus: "virtio",
},
},
})
vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{
Name: "emptydisk1",
VolumeSource: v1.VolumeSource{
EmptyDisk: &v1.EmptyDiskSource{
Capacity: resource.MustParse("2Gi"),
},
},
})
tests.RunVMIAndExpectLaunch(vmi, 90)
expecter, err := tests.LoggedInCirrosExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Checking that /dev/vdc has a capacity of 2Gi")
res, err := expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "sudo blockdev --getsize64 /dev/vdc\n"},
&expect.BExp{R: "2147483648"}, // 2Gi in bytes
}, 10*time.Second)
log.DefaultLogger().Object(vmi).Infof("%v", res)
Expect(err).ToNot(HaveOccurred())
By("Checking if we can write to /dev/vdc")
res, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "sudo mkfs.ext4 /dev/vdc\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
}, 20*time.Second)
log.DefaultLogger().Object(vmi).Infof("%v", res)
Expect(err).ToNot(HaveOccurred())
})
})
Context("[rfe_id:3106][crit:medium][vendor:[email protected]][level:component]With an emptyDisk defined and a specified serial number", func() {
// The following case is mostly similar to the alpine PVC test above, except using different VirtualMachineInstance.
It("[test_id:3135]should create a writeable emptyDisk with the specified serial number", func() {
// Start the VirtualMachineInstance with the empty disk attached
vmi = tests.NewRandomVMIWithEphemeralDiskAndUserdata(tests.ContainerDiskFor(tests.ContainerDiskCirros), "echo hi!")
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{ | Disk: &v1.DiskTarget{
Bus: "virtio",
},
},
})
vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{
Name: "emptydisk1",
VolumeSource: v1.VolumeSource{
EmptyDisk: &v1.EmptyDiskSource{
Capacity: resource.MustParse("1Gi"),
},
},
})
tests.RunVMIAndExpectLaunch(vmi, 90)
expecter, err := tests.LoggedInCirrosExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Checking for the specified serial number")
res, err := expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "sudo find /sys -type f -regex \".*/block/.*/serial\" | xargs cat\n"},
&expect.BExp{R: diskSerial},
}, 10*time.Second)
log.DefaultLogger().Object(vmi).Infof("%v", res)
Expect(err).ToNot(HaveOccurred())
})
})
Context("[rfe_id:3106][crit:medium][vendor:[email protected]][level:component]With ephemeral alpine PVC", func() {
var isRunOnKindInfra bool
tests.BeforeAll(func() {
isRunOnKindInfra = tests.IsRunningOnKindInfra()
})
// The following case is mostly similar to the alpine PVC test above, except using different VirtualMachineInstance.
table.DescribeTable("should be successfully started", func(newVMI VMICreationFunc, storageEngine string) {
tests.SkipPVCTestIfRunnigOnKindInfra()
var ignoreWarnings bool
var pvName string
// Start the VirtualMachineInstance with the PVC attached
if storageEngine == "nfs" {
pvName = initNFS()
ignoreWarnings = true
} else {
pvName = tests.DiskAlpineHostPath
}
vmi = newVMI(pvName)
tests.RunVMIAndExpectLaunchWithIgnoreWarningArg(vmi, 120, ignoreWarnings)
By("Checking that the VirtualMachineInstance console has expected output")
expecter, err := tests.LoggedInAlpineExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
expecter.Close()
},
table.Entry("[test_id:3136]with Ephemeral PVC", tests.NewRandomVMIWithEphemeralPVC, ""),
table.Entry("with Ephemeral PVC from NFS", tests.NewRandomVMIWithEphemeralPVC, "nfs"),
)
// Not a candidate for testing on NFS because the VMI is restarted and NFS PVC can't be re-used
It("[test_id:3137]should not persist data", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()
vmi = tests.NewRandomVMIWithEphemeralPVC(tests.DiskAlpineHostPath)
By("Starting the VirtualMachineInstance")
var createdVMI *v1.VirtualMachineInstance
if isRunOnKindInfra {
createdVMI = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 90)
} else {
createdVMI = tests.RunVMIAndExpectLaunch(vmi, 90)
}
By("Writing an arbitrary file to it's EFI partition")
expecter, err := tests.LoggedInAlpineExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
_, err = expecter.ExpectBatch([]expect.Batcher{
// Because "/" is mounted on tmpfs, we need something that normally persists writes - /dev/sda2 is the EFI partition formatted as vFAT.
&expect.BSnd{S: "mount /dev/sda2 /mnt\n"},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
&expect.BSnd{S: "echo content > /mnt/checkpoint\n"},
// The QEMU process will be killed, therefore the write must be flushed to the disk.
&expect.BSnd{S: "sync\n"},
}, 200*time.Second)
Expect(err).ToNot(HaveOccurred())
By("Killing a VirtualMachineInstance")
err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
tests.WaitForVirtualMachineToDisappearWithTimeout(createdVMI, 120)
By("Starting the VirtualMachineInstance again")
if isRunOnKindInfra {
createdVMI = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 90)
} else {
createdVMI = tests.RunVMIAndExpectLaunch(vmi, 90)
}
By("Making sure that the previously written file is not present")
expecter, err = tests.LoggedInAlpineExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
_, err = expecter.ExpectBatch([]expect.Batcher{
// Same story as when first starting the VirtualMachineInstance - the checkpoint, if persisted, is located at /dev/sda2.
&expect.BSnd{S: "mount /dev/sda2 /mnt\n"},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "0"},
&expect.BSnd{S: "cat /mnt/checkpoint &> /dev/null\n"},
&expect.BSnd{S: "echo $?\n"},
&expect.BExp{R: "1"},
}, 200*time.Second)
Expect(err).ToNot(HaveOccurred())
})
})
Context("[rfe_id:3106][crit:medium][vendor:[email protected]][level:component]With VirtualMachineInstance with two PVCs", func() {
BeforeEach(func() {
// Setup second PVC to use in this context
tests.CreateHostPathPv(tests.CustomHostPath, tests.HostPathCustom)
tests.CreateHostPathPVC(tests.CustomHostPath, "1Gi")
}, 120)
// Not a candidate for testing on NFS because the VMI is restarted and NFS PVC can't be re-used
It("[test_id:3138]should start vmi multiple times", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()
vmi = tests.NewRandomVMIWithPVC(tests.DiskAlpineHostPath)
tests.AddPVCDisk(vmi, "disk1", "virtio", tests.DiskCustomHostPath)
num := 3
By("Starting and stopping the VirtualMachineInstance number of times")
for i := 1; i <= num; i++ {
obj := tests.RunVMIAndExpectLaunch(vmi, 120)
// Verify console on last iteration to verify the VirtualMachineInstance is still booting properly
// after being restarted multiple times
if i == num {
By("Checking that the second disk is present")
expecter, err := tests.LoggedInAlpineExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "blockdev --getsize64 /dev/vdb\n"},
&expect.BExp{R: "67108864"},
}, 200*time.Second)
Expect(err).ToNot(HaveOccurred())
}
err = virtClient.VirtualMachineInstance(vmi.Namespace).Delete(vmi.Name, &metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
tests.WaitForVirtualMachineToDisappearWithTimeout(obj, 120)
}
})
})
Context("[rfe_id:2298][crit:medium][vendor:[email protected]][level:component] With HostDisk and PVC initialization", func() {
Context("With a HostDisk defined", func() {
hostDiskDir := tests.RandTmpDir()
var nodeName string
var cfgMap *k8sv1.ConfigMap
var originalFeatureGates string
BeforeEach(func() {
nodeName = ""
cfgMap, err = virtClient.CoreV1().ConfigMaps(tests.KubeVirtInstallNamespace).Get(kubevirtConfig, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
originalFeatureGates = cfgMap.Data[virtconfig.FeatureGatesKey]
tests.EnableFeatureGate(virtconfig.HostDiskGate)
})
AfterEach(func() {
tests.UpdateClusterConfigValueAndWait(virtconfig.FeatureGatesKey, originalFeatureGates)
// Delete all VMIs and wait until they disappear to ensure that no disk is in use and that we can delete the whole folder
Expect(virtClient.RestClient().Delete().Namespace(tests.NamespaceTestDefault).Resource("virtualmachineinstances").Do().Error()).ToNot(HaveOccurred())
Eventually(func() int {
vmis, err := virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).List(&metav1.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(vmis.Items)
}, 120, 1).Should(BeZero())
if nodeName != "" {
tests.RemoveHostDiskImage(hostDiskDir, nodeName)
}
})
Context("Without the HostDisk feature gate enabled", func() {
BeforeEach(func() {
tests.DisableFeatureGate(virtconfig.HostDiskGate)
})
It("Should fail to start a VMI", func() {
diskName := "disk-" + uuid.NewRandom().String() + ".img"
diskPath := filepath.Join(hostDiskDir, diskName)
vmi = tests.NewRandomVMIWithHostDisk(diskPath, v1.HostDiskExistsOrCreate, "")
virtClient, err := kubecli.GetKubevirtClient()
Expect(err).ToNot(HaveOccurred())
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("HostDisk feature gate is not enabled"))
})
})
Context("With 'DiskExistsOrCreate' type", func() {
diskName := "disk-" + uuid.NewRandom().String() + ".img"
diskPath := filepath.Join(hostDiskDir, diskName)
// Not a candidate for NFS testing due to usage of host disk
table.DescribeTable("Should create a disk image and start", func(driver string) {
By("Starting VirtualMachineInstance")
// do not choose a specific node to run the test
vmi = tests.NewRandomVMIWithHostDisk(diskPath, v1.HostDiskExistsOrCreate, "")
vmi.Spec.Domain.Devices.Disks[0].DiskDevice.Disk.Bus = driver
tests.RunVMIAndExpectLaunch(vmi, 30)
By("Checking if disk.img has been created")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
nodeName = vmiPod.Spec.NodeName
output, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
vmiPod.Spec.Containers[0].Name,
[]string{"find", hostdisk.GetMountedHostDiskDir("host-disk"), "-name", diskName, "-size", "1G", "-o", "-size", "+1G"},
)
Expect(err).ToNot(HaveOccurred())
Expect(output).To(ContainSubstring(hostdisk.GetMountedHostDiskPath("host-disk", diskPath)))
},
table.Entry("[test_id:851]with virtio driver", "virtio"),
table.Entry("[test_id:3057]with sata driver", "sata"),
)
// Not a candidate for NFS testing due to usage of host disk
It("[test_id:3107]should start with multiple hostdisks in the same directory", func() {
By("Starting VirtualMachineInstance")
// do not choose a specific node to run the test
vmi = tests.NewRandomVMIWithHostDisk(diskPath, v1.HostDiskExistsOrCreate, "")
tests.AddHostDisk(vmi, filepath.Join(hostDiskDir, "another.img"), v1.HostDiskExistsOrCreate, "anotherdisk")
tests.RunVMIAndExpectLaunch(vmi, 30)
By("Checking if another.img has been created")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
nodeName = vmiPod.Spec.NodeName
output, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
vmiPod.Spec.Containers[0].Name,
[]string{"find", hostdisk.GetMountedHostDiskDir("anotherdisk"), "-size", "1G", "-o", "-size", "+1G"},
)
Expect(err).ToNot(HaveOccurred())
Expect(output).To(ContainSubstring(hostdisk.GetMountedHostDiskPath("anotherdisk", filepath.Join(hostDiskDir, "another.img"))))
By("Checking if disk.img has been created")
output, err = tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
vmiPod.Spec.Containers[0].Name,
[]string{"find", hostdisk.GetMountedHostDiskDir("host-disk"), "-size", "1G", "-o", "-size", "+1G"},
)
Expect(err).ToNot(HaveOccurred())
Expect(output).To(ContainSubstring(hostdisk.GetMountedHostDiskPath("host-disk", diskPath)))
})
})
Context("With 'DiskExists' type", func() {
diskName := "disk-" + uuid.NewRandom().String() + ".img"
diskPath := filepath.Join(hostDiskDir, diskName)
// it is mandatory to run a pod which is creating a disk image
// on the same node with a HostDisk VMI
BeforeEach(func() {
// create a disk image before test
job := tests.CreateHostDiskImage(diskPath)
job, err = virtClient.CoreV1().Pods(tests.NamespaceTestDefault).Create(job)
Expect(err).ToNot(HaveOccurred())
getStatus := func() k8sv1.PodPhase {
pod, err := virtClient.CoreV1().Pods(tests.NamespaceTestDefault).Get(job.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
if pod.Spec.NodeName != "" && nodeName == "" {
nodeName = pod.Spec.NodeName
}
return pod.Status.Phase
}
Eventually(getStatus, 30, 1).Should(Equal(k8sv1.PodSucceeded))
})
// Not a candidate for NFS testing due to usage of host disk
It("[test_id:2306]Should use existing disk image and start", func() {
By("Starting VirtualMachineInstance")
vmi = tests.NewRandomVMIWithHostDisk(diskPath, v1.HostDiskExists, nodeName)
tests.RunVMIAndExpectLaunch(vmi, 30)
By("Checking if disk.img exists")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
output, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
vmiPod.Spec.Containers[0].Name,
[]string{"find", hostdisk.GetMountedHostDiskDir("host-disk"), "-name", diskName},
)
Expect(err).ToNot(HaveOccurred())
Expect(output).To(ContainSubstring(diskName))
})
// Not a candidate for NFS testing due to usage of host disk
It("[test_id:847]Should fail with a capacity option", func() {
By("Starting VirtualMachineInstance")
vmi = tests.NewRandomVMIWithHostDisk(diskPath, v1.HostDiskExists, nodeName)
for i, volume := range vmi.Spec.Volumes {
if volume.HostDisk != nil {
vmi.Spec.Volumes[i].HostDisk.Capacity = resource.MustParse("1Gi")
break
}
}
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).To(HaveOccurred())
})
})
Context("With unknown hostDisk type", func() {
// Not a candidate for NFS testing due to usage of host disk
It("[test_id:852]Should fail to start VMI", func() {
By("Starting VirtualMachineInstance")
vmi = tests.NewRandomVMIWithHostDisk("/data/unknown.img", "unknown", "")
_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmi)
Expect(err).To(HaveOccurred())
})
})
})
Context("With multiple empty PVCs", func() {
var pvcs = [...]string{"empty-pvc1", "empty-pvc2", "empty-pvc3"}
BeforeEach(func() {
for _, pvc := range pvcs {
tests.CreateHostPathPv(pvc, filepath.Join(tests.HostPathBase, pvc))
tests.CreateHostPathPVC(pvc, "1G")
}
}, 120)
AfterEach(func() {
for _, pvc := range pvcs {
tests.DeletePVC(pvc)
tests.DeletePV(pvc)
}
}, 120)
// Not a candidate for NFS testing because multiple VMIs are started
It("[test_id:868]Should initialize an empty PVC by creating a disk.img", func() {
for _, pvc := range pvcs {
By("starting VirtualMachineInstance")
vmi = tests.NewRandomVMIWithPVC("disk-" + pvc)
tests.RunVMIAndExpectLaunch(vmi, 90)
By("Checking if disk.img exists")
vmiPod := tests.GetRunningPodByVirtualMachineInstance(vmi, tests.NamespaceTestDefault)
output, err := tests.ExecuteCommandOnPod(
virtClient,
vmiPod,
vmiPod.Spec.Containers[0].Name,
[]string{"find", "/var/run/kubevirt-private/vmi-disks/disk0/", "-name", "disk.img", "-size", "1G", "-o", "-size", "+1G"},
)
Expect(err).ToNot(HaveOccurred())
By("Checking if a disk image for PVC has been created")
Expect(strings.Contains(output, "disk.img")).To(BeTrue())
}
})
})
Context("With smaller than requested PVCs", func() {
var mountDir string
var diskPath string
var pod *k8sv1.Pod
var diskSize int
var cfgMap *k8sv1.ConfigMap
var originalFeatureGates string
BeforeEach(func() {
By("Enabling the HostDisk feature gate")
cfgMap, err = virtClient.CoreV1().ConfigMaps(tests.KubeVirtInstallNamespace).Get(kubevirtConfig, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
originalFeatureGates = cfgMap.Data[virtconfig.FeatureGatesKey]
tests.EnableFeatureGate(virtconfig.HostDiskGate)
By("Creating a hostPath pod which prepares a mounted directory which goes away when the pod dies")
tmpDir := tests.RandTmpDir()
mountDir = filepath.Join(tmpDir, "mount")
diskPath = filepath.Join(mountDir, "disk.img")
srcDir := filepath.Join(tmpDir, "src")
cmd := "mkdir -p " + mountDir + " && mkdir -p " + srcDir + " && chcon -t container_file_t " + srcDir + " && mount --bind " + srcDir + " " + mountDir + " && while true; do sleep 1; done"
pod = tests.RenderHostPathJob("host-path-preparator", tmpDir, k8sv1.HostPathDirectoryOrCreate, k8sv1.MountPropagationBidirectional, []string{"/usr/bin/bash", "-c"}, []string{cmd})
pod.Spec.Containers[0].Lifecycle = &k8sv1.Lifecycle{
PreStop: &k8sv1.Handler{
Exec: &k8sv1.ExecAction{
Command: []string{"umount", mountDir},
},
},
}
pod, err = virtClient.CoreV1().Pods(tests.NamespaceTestDefault).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Waiting for hostPath pod to prepare the mounted directory")
Eventually(func() k8sv1.ConditionStatus {
p, err := virtClient.CoreV1().Pods(tests.NamespaceTestDefault).Get(pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
for _, c := range p.Status.Conditions {
if c.Type == k8sv1.PodReady {
return c.Status
}
}
return k8sv1.ConditionFalse
}, 30, 1).Should(Equal(k8sv1.ConditionTrue))
By("Determining the size of the mounted directory")
diskSizeStr, _, err := tests.ExecuteCommandOnPodV2(virtClient, pod, pod.Spec.Containers[0].Name, []string{"/usr/bin/bash", "-c", fmt.Sprintf("df %s | tail -n 1 | awk '{print $4}'", mountDir)})
Expect(err).ToNot(HaveOccurred())
diskSize, err = strconv.Atoi(strings.TrimSpace(diskSizeStr))
diskSize = diskSize * 1000 // byte to kilobyte
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
tests.UpdateClusterConfigValueAndWait(virtconfig.FeatureGatesKey, originalFeatureGates)
})
configureToleration := func(toleration int) {
By("By configuring toleration")
tests.UpdateClusterConfigValueAndWait(virtconfig.LessPVCSpaceTolerationKey, strconv.Itoa(toleration))
}
// Not a candidate for NFS test due to usage of host disk
It("[test_id:3108]Should not initialize an empty PVC with a disk.img when disk is too small even with toleration", func() {
configureToleration(10)
By("starting VirtualMachineInstance")
vmi = tests.NewRandomVMIWithHostDisk(diskPath, v1.HostDiskExistsOrCreate, pod.Spec.NodeName)
vmi.Spec.Volumes[0].HostDisk.Capacity = resource.MustParse(strconv.Itoa(int(float64(diskSize) * 1.2)))
tests.RunVMI(vmi, 30)
By("Checking events")
objectEventWatcher := tests.NewObjectEventWatcher(vmi).SinceWatchedObjectResourceVersion().Timeout(time.Duration(120) * time.Second)
stopChan := make(chan struct{})
defer close(stopChan)
objectEventWatcher.WaitFor(stopChan, tests.WarningEvent, v1.SyncFailed.String())
})
// Not a candidate for NFS test due to usage of host disk
It("[test_id:3109]Should initialize an empty PVC with a disk.img when disk is too small but within toleration", func() {
configureToleration(30)
By("starting VirtualMachineInstance")
vmi = tests.NewRandomVMIWithHostDisk(diskPath, v1.HostDiskExistsOrCreate, pod.Spec.NodeName)
vmi.Spec.Volumes[0].HostDisk.Capacity = resource.MustParse(strconv.Itoa(int(float64(diskSize) * 1.2)))
tests.RunVMIAndExpectLaunch(vmi, 30)
By("Checking events")
objectEventWatcher := tests.NewObjectEventWatcher(vmi).SinceWatchedObjectResourceVersion().Timeout(time.Duration(30) * time.Second)
objectEventWatcher.FailOnWarnings()
stopChan := make(chan struct{})
defer close(stopChan)
objectEventWatcher.WaitFor(stopChan, tests.EventType(hostdisk.EventTypeToleratedSmallPV), hostdisk.EventReasonToleratedSmallPV)
})
})
})
Context("[rfe_id:2288][crit:high][vendor:[email protected]][level:component] With Cirros BlockMode PVC", func() {
BeforeEach(func() {
// create a new PV and PVC (PVs can't be reused)
tests.CreateBlockVolumePvAndPvc("1Gi")
})
// Not a candidate for NFS because local volumes are used in test
It("[test_id:1015] should be successfully started", func() {
tests.SkipPVCTestIfRunnigOnKindInfra()
// Start the VirtualMachineInstance with the PVC attached
vmi = tests.NewRandomVMIWithPVC(tests.BlockDiskForTest)
// Without userdata the hostname isn't set correctly and the login expecter fails...
tests.AddUserData(vmi, "cloud-init", "#!/bin/bash\necho 'hello'\n")
tests.RunVMIAndExpectLaunch(vmi, 90)
By("Checking that the VirtualMachineInstance console has expected output")
expecter, err := tests.LoggedInCirrosExpecter(vmi)
Expect(err).ToNot(HaveOccurred(), "Cirros login successfully")
expecter.Close()
})
})
Context("[rfe_id:2288][crit:high][vendor:[email protected]][level:component]With Alpine ISCSI PVC", func() {
pvName := "test-iscsi-lun" + rand.String(48)
BeforeEach(func() {
tests.SkipIfVersionAboveOrEqual("re-enable this once https://github.com/kubevirt/kubevirt/issues/2272 is fixed", "1.13.3")
// Start a ISCSI POD and service
By("Creating a ISCSI POD")
iscsiTargetIP := tests.CreateISCSITargetPOD(tests.ContainerDiskAlpine)
tests.CreateISCSIPvAndPvc(pvName, "1Gi", iscsiTargetIP, k8sv1.ReadWriteMany, k8sv1.PersistentVolumeBlock)
})
AfterEach(func() {
// create a new PV and PVC (PVs can't be reused)
tests.DeletePvAndPvc(pvName)
})
// Not a candidate for NFS because these tests exercise ISCSI
It("[test_id:3139]should be successfully started", func() {
By("Create a VMIWithPVC")
// Start the VirtualMachineInstance with the PVC attached
vmi = tests.NewRandomVMIWithPVC(pvName)
By("Launching a VMI with PVC ")
tests.RunVMIAndExpectLaunch(vmi, 180)
By("Checking that the VirtualMachineInstance console has expected output")
expecter, err := tests.LoggedInAlpineExpecter(vmi)
Expect(err).ToNot(HaveOccurred(), "Alpine login successfully")
expecter.Close()
})
})
Context("[rfe_id:2288][crit:high][vendor:[email protected]][level:component] With not existing PVC", func() {
// Not a candidate for NFS because the PVC in question doesn't actually exist
It("[test_id:1040] should get unschedulable condition", func() {
// Start the VirtualMachineInstance
pvcName := "nonExistingPVC"
vmi = tests.NewRandomVMIWithPVC(pvcName)
tests.RunVMI(vmi, 10)
virtClient, err := kubecli.GetKubevirtClient()
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.Name, &metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
if vmi.Status.Phase != v1.Pending {
return false
}
if len(vmi.Status.Conditions) == 0 {
return false
}
expectPodScheduledCondition := func(vmi *v1.VirtualMachineInstance) {
getType := func(c v1.VirtualMachineInstanceCondition) string { return string(c.Type) }
getReason := func(c v1.VirtualMachineInstanceCondition) string { return c.Reason }
getStatus := func(c v1.VirtualMachineInstanceCondition) k8sv1.ConditionStatus { return c.Status }
getMessage := func(c v1.VirtualMachineInstanceCondition) string { return c.Message }
Expect(vmi.Status.Conditions).To(
ContainElement(
And(
WithTransform(getType, Equal(string(k8sv1.PodScheduled))),
WithTransform(getReason, Equal(k8sv1.PodReasonUnschedulable)),
WithTransform(getStatus, Equal(k8sv1.ConditionFalse)),
WithTransform(getMessage, Equal(fmt.Sprintf("failed to render launch manifest: didn't find PVC %v", pvcName))),
),
),
)
}
expectPodScheduledCondition(vmi)
return true
}, time.Duration(10)*time.Second).Should(BeTrue(), "Timed out waiting for VMI to get Unschedulable condition")
})
})
})
}) | Name: "emptydisk1",
Serial: diskSerial,
DiskDevice: v1.DiskDevice{ |
students.controller.ts | import { Controller, Get, Post, Put, Delete, Param, Body } from '@nestjs/common';
import { Student } from './student.interface';
import { StudentsService } from './students.service';
import { StudentDTO } from './student.dto'
@Controller('students')
export class | {
constructor(private readonly studentsService: StudentsService) {
}
@Get()
findAll(): Promise<Student[]> {
return this.studentsService.findAll()
}
@Get(':id')
find(@Param('id') id: string) {
return this.studentsService.find(id)
}
@Post()
create(@Body() student: StudentDTO): Promise<Student> {
return this.studentsService.create(student)
}
@Put(':id')
update(@Param('id') id: string, @Body() student: StudentDTO): Promise<Student> {
return this.studentsService.update(id, student as Student)
}
@Delete(':id')
delete(@Param('id') id: string): Promise<Student> {
return this.studentsService.delete(id)
}
}
| StudentsController |
Cache.d.ts | import { DataProxy } from './DataProxy';
export declare namespace Cache {
type WatchCallback = (newData: any) => void;
interface EvictionResult {
success: Boolean;
| optimistic: boolean;
}
interface WriteOptions<TResult = any, TVariables = any> extends DataProxy.Query<TVariables> {
dataId: string;
result: TResult;
}
interface DiffOptions extends ReadOptions {
returnPartialData?: boolean;
}
interface WatchOptions extends ReadOptions {
callback: WatchCallback;
}
interface EvictOptions<TVariables = any> extends DataProxy.Query<TVariables> {
rootId?: string;
}
export import DiffResult = DataProxy.DiffResult;
export import WriteQueryOptions = DataProxy.WriteQueryOptions;
export import WriteFragmentOptions = DataProxy.WriteFragmentOptions;
export import WriteDataOptions = DataProxy.WriteDataOptions;
export import Fragment = DataProxy.Fragment;
}
//# sourceMappingURL=Cache.d.ts.map | }
interface ReadOptions<TVariables = any> extends DataProxy.Query<TVariables> {
rootId?: string;
previousResult?: any;
|
test_auto_HistogramMatching.py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..histogrammatching import HistogramMatching
def test_HistogramMatching_inputs(): | usedefault=True,
),
inputVolume=dict(
argstr='%s',
position=-3,
),
numberOfHistogramLevels=dict(argstr='--numberOfHistogramLevels %d', ),
numberOfMatchPoints=dict(argstr='--numberOfMatchPoints %d', ),
outputVolume=dict(
argstr='%s',
hash_files=False,
position=-1,
),
referenceVolume=dict(
argstr='%s',
position=-2,
),
threshold=dict(argstr='--threshold ', ),
)
inputs = HistogramMatching.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_HistogramMatching_outputs():
output_map = dict(outputVolume=dict(position=-1, ), )
outputs = HistogramMatching.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value | input_map = dict(
args=dict(argstr='%s', ),
environ=dict(
nohash=True, |
absolute_layout_in_wrap_reverse_row_container_flex_end.rs | pub fn compute() | {
let mut sprawl = sprawl::Sprawl::new();
let node0 = sprawl
.new_node(
sprawl::style::Style {
position_type: sprawl::style::PositionType::Absolute,
align_self: sprawl::style::AlignSelf::FlexEnd,
size: sprawl::geometry::Size {
width: sprawl::style::Dimension::Points(20f32),
height: sprawl::style::Dimension::Points(20f32),
..Default::default()
},
..Default::default()
},
&[],
)
.unwrap();
let node = sprawl
.new_node(
sprawl::style::Style {
flex_wrap: sprawl::style::FlexWrap::WrapReverse,
size: sprawl::geometry::Size {
width: sprawl::style::Dimension::Points(100f32),
height: sprawl::style::Dimension::Points(100f32),
..Default::default()
},
..Default::default()
},
&[node0],
)
.unwrap();
sprawl.compute_layout(node, sprawl::geometry::Size::undefined()).unwrap();
} |
|
simple.rs | use super::{infer, type_at, type_at_pos};
use crate::test_db::TestDB;
use insta::assert_snapshot;
use ra_db::fixture::WithFixture;
#[test]
fn infer_box() {
let (db, pos) = TestDB::with_position(
r#"
//- /main.rs crate:main deps:std
fn test() {
let x = box 1;
let t = (x, box x, box &1, box [1]);
t<|>;
}
//- /std.rs crate:std
#[prelude_import] use prelude::*;
mod prelude {}
mod boxed {
#[lang = "owned_box"]
pub struct Box<T: ?Sized> {
inner: *mut T,
}
}
"#,
);
assert_eq!("(Box<i32>, Box<Box<i32>>, Box<&i32>, Box<[i32;_]>)", type_at_pos(&db, pos));
}
#[test]
fn infer_adt_self() {
let (db, pos) = TestDB::with_position(
r#"
//- /main.rs
enum Nat { Succ(Self), Demo(Nat), Zero }
fn test() {
let foo: Nat = Nat::Zero;
if let Nat::Succ(x) = foo {
x<|>
}
}
"#,
);
assert_eq!("Nat", type_at_pos(&db, pos));
}
#[test]
fn infer_ranges() {
let (db, pos) = TestDB::with_position(
r#"
//- /main.rs crate:main deps:std
fn test() {
let a = ..;
let b = 1..;
let c = ..2u32;
let d = 1..2usize;
let e = ..=10;
let f = 'a'..='z';
let t = (a, b, c, d, e, f);
t<|>;
}
//- /std.rs crate:std
#[prelude_import] use prelude::*;
mod prelude {}
pub mod ops {
pub struct Range<Idx> {
pub start: Idx,
pub end: Idx,
}
pub struct RangeFrom<Idx> {
pub start: Idx,
}
struct RangeFull;
pub struct RangeInclusive<Idx> {
start: Idx,
end: Idx,
is_empty: u8,
}
pub struct RangeTo<Idx> {
pub end: Idx,
}
pub struct RangeToInclusive<Idx> {
pub end: Idx,
}
}
"#,
);
assert_eq!(
"(RangeFull, RangeFrom<i32>, RangeTo<u32>, Range<usize>, RangeToInclusive<i32>, RangeInclusive<char>)",
type_at_pos(&db, pos),
);
}
#[test]
fn infer_while_let() {
let (db, pos) = TestDB::with_position(
r#"
//- /main.rs
enum Option<T> { Some(T), None }
fn test() {
let foo: Option<f32> = None;
while let Option::Some(x) = foo {
<|>x
}
}
"#,
);
assert_eq!("f32", type_at_pos(&db, pos));
}
#[test]
fn infer_basics() {
assert_snapshot!(
infer(r#"
fn test(a: u32, b: isize, c: !, d: &str) {
a;
b;
c;
d;
1usize;
1isize;
"test";
1.0f32;
}"#),
@r###"
[9; 10) 'a': u32
[17; 18) 'b': isize
[27; 28) 'c': !
[33; 34) 'd': &str
[42; 121) '{ ...f32; }': !
[48; 49) 'a': u32
[55; 56) 'b': isize
[62; 63) 'c': !
[69; 70) 'd': &str
[76; 82) '1usize': usize
[88; 94) '1isize': isize
[100; 106) '"test"': &str
[112; 118) '1.0f32': f32
"###
);
}
#[test]
fn infer_let() {
assert_snapshot!(
infer(r#"
fn test() {
let a = 1isize;
let b: usize = 1;
let c = b;
let d: u32;
let e;
let f: i32 = e;
}
"#),
@r###"
[11; 118) '{ ...= e; }': ()
[21; 22) 'a': isize
[25; 31) '1isize': isize
[41; 42) 'b': usize
[52; 53) '1': usize
[63; 64) 'c': usize
[67; 68) 'b': usize
[78; 79) 'd': u32
[94; 95) 'e': i32
[105; 106) 'f': i32
[114; 115) 'e': i32
"###
);
}
#[test]
fn infer_paths() {
assert_snapshot!(
infer(r#"
fn a() -> u32 { 1 }
mod b {
fn c() -> u32 { 1 }
}
fn test() {
a();
b::c();
}
"#),
@r###"
[15; 20) '{ 1 }': u32
[17; 18) '1': u32
[48; 53) '{ 1 }': u32
[50; 51) '1': u32
[67; 91) '{ ...c(); }': ()
[73; 74) 'a': fn a() -> u32
[73; 76) 'a()': u32
[82; 86) 'b::c': fn c() -> u32
[82; 88) 'b::c()': u32
"###
);
}
#[test]
fn infer_path_type() {
assert_snapshot!(
infer(r#"
struct S;
impl S {
fn foo() -> i32 { 1 }
}
fn test() {
S::foo();
<S>::foo();
}
"#),
@r###"
[41; 46) '{ 1 }': i32
[43; 44) '1': i32
[60; 93) '{ ...o(); }': ()
[66; 72) 'S::foo': fn foo() -> i32
[66; 74) 'S::foo()': i32
[80; 88) '<S>::foo': fn foo() -> i32
[80; 90) '<S>::foo()': i32
"###
);
}
#[test]
fn infer_struct() {
assert_snapshot!(
infer(r#"
struct A {
b: B,
c: C,
}
struct B;
struct C(usize);
fn test() {
let c = C(1);
B;
let a: A = A { b: B, c: C(1) };
a.b;
a.c;
}
"#),
@r###"
[72; 154) '{ ...a.c; }': ()
[82; 83) 'c': C
[86; 87) 'C': C(usize) -> C
[86; 90) 'C(1)': C
[88; 89) '1': usize
[96; 97) 'B': B
[107; 108) 'a': A
[114; 133) 'A { b:...C(1) }': A
[121; 122) 'B': B
[127; 128) 'C': C(usize) -> C
[127; 131) 'C(1)': C
[129; 130) '1': usize
[139; 140) 'a': A
[139; 142) 'a.b': B
[148; 149) 'a': A
[148; 151) 'a.c': C
"###
);
}
#[test]
fn infer_enum() {
assert_snapshot!(
infer(r#"
enum E {
V1 { field: u32 },
V2
}
fn test() {
E::V1 { field: 1 };
E::V2;
}"#),
@r###"
[48; 82) '{ E:...:V2; }': ()
[52; 70) 'E::V1 ...d: 1 }': E
[67; 68) '1': u32
[74; 79) 'E::V2': E
"###
);
}
#[test]
fn infer_refs() {
assert_snapshot!(
infer(r#"
fn test(a: &u32, b: &mut u32, c: *const u32, d: *mut u32) {
a;
*a;
&a;
&mut a;
b;
*b;
&b;
c;
*c;
d;
*d;
}
"#),
@r###"
[9; 10) 'a': &u32
[18; 19) 'b': &mut u32
[31; 32) 'c': *const u32
[46; 47) 'd': *mut u32
[59; 150) '{ ... *d; }': ()
[65; 66) 'a': &u32
[72; 74) '*a': u32
[73; 74) 'a': &u32
[80; 82) '&a': &&u32
[81; 82) 'a': &u32
[88; 94) '&mut a': &mut &u32
[93; 94) 'a': &u32
[100; 101) 'b': &mut u32
[107; 109) '*b': u32
[108; 109) 'b': &mut u32
[115; 117) '&b': &&mut u32
[116; 117) 'b': &mut u32
[123; 124) 'c': *const u32
[130; 132) '*c': u32
[131; 132) 'c': *const u32
[138; 139) 'd': *mut u32
[145; 147) '*d': u32
[146; 147) 'd': *mut u32
"###
);
}
#[test]
fn infer_literals() {
assert_snapshot!(
infer(r##"
fn test() {
5i32;
5f32;
5f64;
"hello";
b"bytes";
'c';
b'b';
3.14;
5000;
false;
true;
r#"
//! doc
// non-doc
mod foo {}
"#;
br#"yolo"#;
}
"##),
@r###"
[11; 221) '{ ...o"#; }': ()
[17; 21) '5i32': i32
[27; 31) '5f32': f32
[37; 41) '5f64': f64
[47; 54) '"hello"': &str
[60; 68) 'b"bytes"': &[u8]
[74; 77) ''c'': char
[83; 87) 'b'b'': u8
[93; 97) '3.14': f64
[103; 107) '5000': i32
[113; 118) 'false': bool
[124; 128) 'true': bool
[134; 202) 'r#" ... "#': &str
[208; 218) 'br#"yolo"#': &[u8]
"###
);
}
#[test]
fn infer_unary_op() {
assert_snapshot!(
infer(r#"
enum SomeType {}
fn test(x: SomeType) {
let b = false;
let c = !b;
let a = 100;
let d: i128 = -a;
let e = -100;
let f = !!!true;
let g = !42;
let h = !10u32;
let j = !a;
-3.14;
!3;
-x;
!x;
-"hello";
!"hello";
}
"#),
@r###"
[27; 28) 'x': SomeType
[40; 272) '{ ...lo"; }': ()
[50; 51) 'b': bool
[54; 59) 'false': bool
[69; 70) 'c': bool
[73; 75) '!b': bool
[74; 75) 'b': bool
[85; 86) 'a': i128
[89; 92) '100': i128
[102; 103) 'd': i128
[112; 114) '-a': i128
[113; 114) 'a': i128
[124; 125) 'e': i32
[128; 132) '-100': i32
[129; 132) '100': i32
[142; 143) 'f': bool
[146; 153) '!!!true': bool
[147; 153) '!!true': bool
[148; 153) '!true': bool
[149; 153) 'true': bool
[163; 164) 'g': i32
[167; 170) '!42': i32
[168; 170) '42': i32
[180; 181) 'h': u32
[184; 190) '!10u32': u32
[185; 190) '10u32': u32
[200; 201) 'j': i128
[204; 206) '!a': i128
[205; 206) 'a': i128
[212; 217) '-3.14': f64
[213; 217) '3.14': f64
[223; 225) '!3': i32
[224; 225) '3': i32
[231; 233) '-x': {unknown}
[232; 233) 'x': SomeType
[239; 241) '!x': {unknown}
[240; 241) 'x': SomeType
[247; 255) '-"hello"': {unknown}
[248; 255) '"hello"': &str
[261; 269) '!"hello"': {unknown}
[262; 269) '"hello"': &str
"###
);
}
#[test]
fn infer_backwards() {
assert_snapshot!(
infer(r#"
fn takes_u32(x: u32) {}
struct S { i32_field: i32 }
fn test() -> &mut &f64 {
let a = unknown_function();
takes_u32(a);
let b = unknown_function();
S { i32_field: b };
let c = unknown_function();
&mut &c
}
"#),
@r###"
[14; 15) 'x': u32
[22; 24) '{}': ()
[78; 231) '{ ...t &c }': &mut &f64
[88; 89) 'a': u32
[92; 108) 'unknow...nction': {unknown}
[92; 110) 'unknow...tion()': u32
[116; 125) 'takes_u32': fn takes_u32(u32) -> ()
[116; 128) 'takes_u32(a)': ()
[126; 127) 'a': u32
[138; 139) 'b': i32
[142; 158) 'unknow...nction': {unknown}
[142; 160) 'unknow...tion()': i32
[166; 184) 'S { i3...d: b }': S
[181; 182) 'b': i32
[194; 195) 'c': f64
[198; 214) 'unknow...nction': {unknown}
[198; 216) 'unknow...tion()': f64
[222; 229) '&mut &c': &mut &f64
[227; 229) '&c': &f64
[228; 229) 'c': f64
"###
);
}
#[test]
fn infer_self() {
assert_snapshot!(
infer(r#"
struct S;
impl S {
fn test(&self) {
self;
}
fn test2(self: &Self) {
self;
}
fn test3() -> Self {
S {}
}
fn test4() -> Self {
Self {}
}
}
"#),
@r###"
[34; 38) 'self': &S
[40; 61) '{ ... }': ()
[50; 54) 'self': &S
[75; 79) 'self': &S
[88; 109) '{ ... }': ()
[98; 102) 'self': &S
[133; 153) '{ ... }': S
[143; 147) 'S {}': S
[177; 200) '{ ... }': S
[187; 194) 'Self {}': S
"###
);
}
#[test]
fn infer_binary_op() {
assert_snapshot!(
infer(r#"
fn f(x: bool) -> i32 {
0i32
}
fn test() -> bool {
let x = a && b;
let y = true || false;
let z = x == y;
let t = x != y;
let minus_forty: isize = -40isize;
let h = minus_forty <= CONST_2;
let c = f(z || y) + 5;
let d = b;
let g = minus_forty ^= i;
let ten: usize = 10;
let ten_is_eleven = ten == some_num;
ten < 3
}
"#),
@r###"
[6; 7) 'x': bool
[22; 34) '{ 0i32 }': i32
[28; 32) '0i32': i32
[54; 370) '{ ... < 3 }': bool
[64; 65) 'x': bool
[68; 69) 'a': bool
[68; 74) 'a && b': bool
[73; 74) 'b': bool
[84; 85) 'y': bool
[88; 92) 'true': bool
[88; 101) 'true || false': bool
[96; 101) 'false': bool
[111; 112) 'z': bool
[115; 116) 'x': bool
[115; 121) 'x == y': bool
[120; 121) 'y': bool
[131; 132) 't': bool
[135; 136) 'x': bool
[135; 141) 'x != y': bool
[140; 141) 'y': bool
[151; 162) 'minus_forty': isize
[172; 180) '-40isize': isize
[173; 180) '40isize': isize
[190; 191) 'h': bool
[194; 205) 'minus_forty': isize
[194; 216) 'minus_...ONST_2': bool
[209; 216) 'CONST_2': isize
[226; 227) 'c': i32
[230; 231) 'f': fn f(bool) -> i32
[230; 239) 'f(z || y)': i32
[230; 243) 'f(z || y) + 5': i32
[232; 233) 'z': bool
[232; 238) 'z || y': bool
[237; 238) 'y': bool
[242; 243) '5': i32
[253; 254) 'd': {unknown}
[257; 258) 'b': {unknown}
[268; 269) 'g': ()
[272; 283) 'minus_forty': isize
[272; 288) 'minus_...y ^= i': ()
[287; 288) 'i': isize
[298; 301) 'ten': usize
[311; 313) '10': usize
[323; 336) 'ten_is_eleven': bool
[339; 342) 'ten': usize
[339; 354) 'ten == some_num': bool
[346; 354) 'some_num': usize
[361; 364) 'ten': usize
[361; 368) 'ten < 3': bool
[367; 368) '3': usize
"###
);
}
#[test]
fn infer_field_autoderef() {
assert_snapshot!(
infer(r#"
struct A {
b: B,
}
struct B;
fn test1(a: A) {
let a1 = a;
a1.b;
let a2 = &a;
a2.b;
let a3 = &mut a;
a3.b;
let a4 = &&&&&&&a;
a4.b;
let a5 = &mut &&mut &&mut a;
a5.b;
}
fn test2(a1: *const A, a2: *mut A) {
a1.b;
a2.b;
}
"#),
@r###"
[44; 45) 'a': A
[50; 213) '{ ...5.b; }': ()
[60; 62) 'a1': A
[65; 66) 'a': A
[72; 74) 'a1': A
[72; 76) 'a1.b': B
[86; 88) 'a2': &A
[91; 93) '&a': &A
[92; 93) 'a': A
[99; 101) 'a2': &A
[99; 103) 'a2.b': B
[113; 115) 'a3': &mut A
[118; 124) '&mut a': &mut A
[123; 124) 'a': A
[130; 132) 'a3': &mut A
[130; 134) 'a3.b': B
[144; 146) 'a4': &&&&&&&A
[149; 157) '&&&&&&&a': &&&&&&&A
[150; 157) '&&&&&&a': &&&&&&A
[151; 157) '&&&&&a': &&&&&A
[152; 157) '&&&&a': &&&&A
[153; 157) '&&&a': &&&A
[154; 157) '&&a': &&A
[155; 157) '&a': &A
[156; 157) 'a': A
[163; 165) 'a4': &&&&&&&A
[163; 167) 'a4.b': B
[177; 179) 'a5': &mut &&mut &&mut A
[182; 200) '&mut &...&mut a': &mut &&mut &&mut A
[187; 200) '&&mut &&mut a': &&mut &&mut A
[188; 200) '&mut &&mut a': &mut &&mut A
[193; 200) '&&mut a': &&mut A
[194; 200) '&mut a': &mut A
[199; 200) 'a': A
[206; 208) 'a5': &mut &&mut &&mut A
[206; 210) 'a5.b': B
[224; 226) 'a1': *const A
[238; 240) 'a2': *mut A
[250; 273) '{ ...2.b; }': ()
[256; 258) 'a1': *const A
[256; 260) 'a1.b': B
[266; 268) 'a2': *mut A
[266; 270) 'a2.b': B
"###
);
}
#[test]
fn infer_argument_autoderef() {
assert_snapshot!(
infer(r#"
#[lang = "deref"]
pub trait Deref {
type Target;
fn deref(&self) -> &Self::Target;
}
struct A<T>(T);
impl<T> A<T> {
fn foo(&self) -> &T {
&self.0
}
}
struct B<T>(T);
impl<T> Deref for B<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
fn test() {
let t = A::foo(&&B(B(A(42))));
}
"#),
@r###"
[68; 72) 'self': &Self
[139; 143) 'self': &A<T>
[151; 174) '{ ... }': &T
[161; 168) '&self.0': &T
[162; 166) 'self': &A<T>
[162; 168) 'self.0': T
[255; 259) 'self': &B<T>
[278; 301) '{ ... }': &T
[288; 295) '&self.0': &T
[289; 293) 'self': &B<T>
[289; 295) 'self.0': T
[315; 353) '{ ...))); }': ()
[325; 326) 't': &i32
[329; 335) 'A::foo': fn foo<i32>(&A<T>) -> &T
[329; 350) 'A::foo...42))))': &i32
[336; 349) '&&B(B(A(42)))': &&B<B<A<i32>>>
[337; 349) '&B(B(A(42)))': &B<B<A<i32>>>
[338; 339) 'B': B<B<A<i32>>>(T) -> B<T>
[338; 349) 'B(B(A(42)))': B<B<A<i32>>>
[340; 341) 'B': B<A<i32>>(T) -> B<T>
[340; 348) 'B(A(42))': B<A<i32>>
[342; 343) 'A': A<i32>(T) -> A<T>
[342; 347) 'A(42)': A<i32>
[344; 346) '42': i32
"###
);
}
#[test]
fn infer_method_argument_autoderef() {
assert_snapshot!(
infer(r#"
#[lang = "deref"]
pub trait Deref {
type Target;
fn deref(&self) -> &Self::Target;
}
struct A<T>(*mut T);
impl<T> A<T> {
fn foo(&self, x: &A<T>) -> &T {
&*x.0
}
}
struct B<T>(T);
impl<T> Deref for B<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
fn test(a: A<i32>) {
let t = A(0 as *mut _).foo(&&B(B(a)));
}
"#),
@r###"
[68; 72) 'self': &Self
[144; 148) 'self': &A<T>
[150; 151) 'x': &A<T>
[166; 187) '{ ... }': &T
[176; 181) '&*x.0': &T
[177; 181) '*x.0': T
[178; 179) 'x': &A<T>
[178; 181) 'x.0': *mut T
[268; 272) 'self': &B<T>
[291; 314) '{ ... }': &T
[301; 308) '&self.0': &T
[302; 306) 'self': &B<T>
[302; 308) 'self.0': T
[326; 327) 'a': A<i32>
[337; 383) '{ ...))); }': ()
[347; 348) 't': &i32
[351; 352) 'A': A<i32>(*mut T) -> A<T>
[351; 365) 'A(0 as *mut _)': A<i32>
[351; 380) 'A(0 as...B(a)))': &i32
[353; 354) '0': i32
[353; 364) '0 as *mut _': *mut i32
[370; 379) '&&B(B(a))': &&B<B<A<i32>>>
[371; 379) '&B(B(a))': &B<B<A<i32>>>
[372; 373) 'B': B<B<A<i32>>>(T) -> B<T>
[372; 379) 'B(B(a))': B<B<A<i32>>>
[374; 375) 'B': B<A<i32>>(T) -> B<T>
[374; 378) 'B(a)': B<A<i32>>
[376; 377) 'a': A<i32>
"###
);
}
#[test]
fn infer_in_elseif() {
assert_snapshot!(
infer(r#"
struct Foo { field: i32 }
fn main(foo: Foo) {
if true {
} else if false {
foo.field
}
}
"#),
@r###"
[35; 38) 'foo': Foo
[45; 109) '{ ... } }': ()
[51; 107) 'if tru... }': ()
[54; 58) 'true': bool
[59; 67) '{ }': ()
[73; 107) 'if fal... }': ()
[76; 81) 'false': bool
[82; 107) '{ ... }': i32
[92; 95) 'foo': Foo
[92; 101) 'foo.field': i32
"###
)
}
#[test]
fn infer_if_match_with_return() {
assert_snapshot!(
infer(r#"
fn foo() {
let _x1 = if true {
1
} else {
return;
};
let _x2 = if true {
2
} else {
return
};
let _x3 = match true {
true => 3,
_ => {
return;
}
};
let _x4 = match true {
true => 4,
_ => return
};
}"#),
@r###"
[10; 323) '{ ... }; }': ()
[20; 23) '_x1': i32
[26; 80) 'if tru... }': i32
[29; 33) 'true': bool
[34; 51) '{ ... }': i32
[44; 45) '1': i32
[57; 80) '{ ... }': !
[67; 73) 'return': !
[90; 93) '_x2': i32
[96; 149) 'if tru... }': i32
[99; 103) 'true': bool
[104; 121) '{ ... }': i32
[114; 115) '2': i32
[127; 149) '{ ... }': !
[137; 143) 'return': !
[159; 162) '_x3': i32
[165; 247) 'match ... }': i32
[171; 175) 'true': bool
[186; 190) 'true': bool
[194; 195) '3': i32
[205; 206) '_': bool
[210; 241) '{ ... }': !
[224; 230) 'return': !
[257; 260) '_x4': i32
[263; 320) 'match ... }': i32
[269; 273) 'true': bool
[284; 288) 'true': bool
[292; 293) '4': i32
[303; 304) '_': bool
[308; 314) 'return': !
"###
)
}
#[test]
fn infer_inherent_method() {
assert_snapshot!(
infer(r#"
struct A;
impl A {
fn foo(self, x: u32) -> i32 {}
}
mod b {
impl super::A {
fn bar(&self, x: u64) -> i64 {}
}
}
fn test(a: A) {
a.foo(1);
(&a).bar(1);
a.bar(1);
}
"#),
@r###"
[32; 36) 'self': A
[38; 39) 'x': u32
[53; 55) '{}': ()
[103; 107) 'self': &A
[109; 110) 'x': u64
[124; 126) '{}': ()
[144; 145) 'a': A
[150; 198) '{ ...(1); }': ()
[156; 157) 'a': A
[156; 164) 'a.foo(1)': i32
[162; 163) '1': u32
[170; 181) '(&a).bar(1)': i64
[171; 173) '&a': &A
[172; 173) 'a': A
[179; 180) '1': u64
[187; 188) 'a': A
[187; 195) 'a.bar(1)': i64
[193; 194) '1': u64
"###
);
}
#[test]
fn infer_inherent_method_str() {
assert_snapshot!(
infer(r#"
#[lang = "str"]
impl str {
fn foo(&self) -> i32 {}
}
fn test() {
"foo".foo();
}
"#),
@r###"
[40; 44) 'self': &str
[53; 55) '{}': ()
[69; 89) '{ ...o(); }': ()
[75; 80) '"foo"': &str
[75; 86) '"foo".foo()': i32
"###
);
}
#[test]
fn infer_tuple() {
assert_snapshot!(
infer(r#"
fn test(x: &str, y: isize) {
let a: (u32, &str) = (1, "a");
let b = (a, x);
let c = (y, x);
let d = (c, x);
let e = (1, "e");
let f = (e, "d");
}
"#),
@r###"
[9; 10) 'x': &str
[18; 19) 'y': isize
[28; 170) '{ ...d"); }': ()
[38; 39) 'a': (u32, &str)
[55; 63) '(1, "a")': (u32, &str)
[56; 57) '1': u32
[59; 62) '"a"': &str
[73; 74) 'b': ((u32, &str), &str)
[77; 83) '(a, x)': ((u32, &str), &str)
[78; 79) 'a': (u32, &str)
[81; 82) 'x': &str
[93; 94) 'c': (isize, &str)
[97; 103) '(y, x)': (isize, &str)
[98; 99) 'y': isize
[101; 102) 'x': &str
[113; 114) 'd': ((isize, &str), &str)
[117; 123) '(c, x)': ((isize, &str), &str)
[118; 119) 'c': (isize, &str)
[121; 122) 'x': &str
[133; 134) 'e': (i32, &str)
[137; 145) '(1, "e")': (i32, &str)
[138; 139) '1': i32
[141; 144) '"e"': &str
[155; 156) 'f': ((i32, &str), &str)
[159; 167) '(e, "d")': ((i32, &str), &str)
[160; 161) 'e': (i32, &str)
[163; 166) '"d"': &str
"###
);
}
#[test]
fn infer_array() {
assert_snapshot!(
infer(r#"
fn test(x: &str, y: isize) {
let a = [x];
let b = [a, a];
let c = [b, b];
let d = [y, 1, 2, 3];
let d = [1, y, 2, 3];
let e = [y];
let f = [d, d];
let g = [e, e];
let h = [1, 2];
let i = ["a", "b"];
let b = [a, ["b"]];
let x: [u8; 0] = [];
}
"#),
@r###"
[9; 10) 'x': &str
[18; 19) 'y': isize
[28; 293) '{ ... []; }': ()
[38; 39) 'a': [&str;_]
[42; 45) '[x]': [&str;_]
[43; 44) 'x': &str
[55; 56) 'b': [[&str;_];_]
[59; 65) '[a, a]': [[&str;_];_]
[60; 61) 'a': [&str;_]
[63; 64) 'a': [&str;_]
[75; 76) 'c': [[[&str;_];_];_]
[79; 85) '[b, b]': [[[&str;_];_];_]
[80; 81) 'b': [[&str;_];_]
[83; 84) 'b': [[&str;_];_]
[96; 97) 'd': [isize;_]
[100; 112) '[y, 1, 2, 3]': [isize;_]
[101; 102) 'y': isize
[104; 105) '1': isize
[107; 108) '2': isize
[110; 111) '3': isize
[122; 123) 'd': [isize;_]
[126; 138) '[1, y, 2, 3]': [isize;_]
[127; 128) '1': isize
[130; 131) 'y': isize
[133; 134) '2': isize
[136; 137) '3': isize
[148; 149) 'e': [isize;_]
[152; 155) '[y]': [isize;_]
[153; 154) 'y': isize
[165; 166) 'f': [[isize;_];_]
[169; 175) '[d, d]': [[isize;_];_]
[170; 171) 'd': [isize;_]
[173; 174) 'd': [isize;_]
[185; 186) 'g': [[isize;_];_]
[189; 195) '[e, e]': [[isize;_];_]
[190; 191) 'e': [isize;_]
[193; 194) 'e': [isize;_]
[206; 207) 'h': [i32;_]
[210; 216) '[1, 2]': [i32;_]
[211; 212) '1': i32
[214; 215) '2': i32
[226; 227) 'i': [&str;_]
[230; 240) '["a", "b"]': [&str;_]
[231; 234) '"a"': &str
[236; 239) '"b"': &str
[251; 252) 'b': [[&str;_];_]
[255; 265) '[a, ["b"]]': [[&str;_];_]
[256; 257) 'a': [&str;_]
[259; 264) '["b"]': [&str;_]
[260; 263) '"b"': &str
[275; 276) 'x': [u8;_]
[288; 290) '[]': [u8;_]
"###
);
}
#[test]
fn infer_struct_generics() {
assert_snapshot!(
infer(r#"
struct A<T> {
x: T,
}
fn test(a1: A<u32>, i: i32) {
a1.x;
let a2 = A { x: i };
a2.x;
let a3 = A::<i128> { x: 1 };
a3.x;
}
"#),
@r###"
[36; 38) 'a1': A<u32>
[48; 49) 'i': i32
[56; 147) '{ ...3.x; }': ()
[62; 64) 'a1': A<u32>
[62; 66) 'a1.x': u32
[76; 78) 'a2': A<i32>
[81; 91) 'A { x: i }': A<i32>
[88; 89) 'i': i32
[97; 99) 'a2': A<i32>
[97; 101) 'a2.x': i32
[111; 113) 'a3': A<i128>
[116; 134) 'A::<i1...x: 1 }': A<i128>
[131; 132) '1': i128
[140; 142) 'a3': A<i128>
[140; 144) 'a3.x': i128
"###
);
}
#[test]
fn infer_tuple_struct_generics() {
assert_snapshot!(
infer(r#"
struct A<T>(T);
enum Option<T> { Some(T), None }
use Option::*;
fn test() {
A(42);
A(42u128);
Some("x");
Option::Some("x");
None;
let x: Option<i64> = None;
}
"#),
@r###"
[76; 184) '{ ...one; }': ()
[82; 83) 'A': A<i32>(T) -> A<T>
[82; 87) 'A(42)': A<i32>
[84; 86) '42': i32
[93; 94) 'A': A<u128>(T) -> A<T>
[93; 102) 'A(42u128)': A<u128>
[95; 101) '42u128': u128
[108; 112) 'Some': Some<&str>(T) -> Option<T>
[108; 117) 'Some("x")': Option<&str>
[113; 116) '"x"': &str
[123; 135) 'Option::Some': Some<&str>(T) -> Option<T>
[123; 140) 'Option...e("x")': Option<&str>
[136; 139) '"x"': &str
[146; 150) 'None': Option<{unknown}>
[160; 161) 'x': Option<i64>
[177; 181) 'None': Option<i64>
"###
);
}
#[test]
fn infer_function_generics() {
assert_snapshot!(
infer(r#"
fn id<T>(t: T) -> T { t }
fn test() {
id(1u32);
id::<i128>(1);
let x: u64 = id(1);
}
"#),
@r###"
[10; 11) 't': T
[21; 26) '{ t }': T
[23; 24) 't': T
[38; 98) '{ ...(1); }': ()
[44; 46) 'id': fn id<u32>(T) -> T
[44; 52) 'id(1u32)': u32
[47; 51) '1u32': u32
[58; 68) 'id::<i128>': fn id<i128>(T) -> T
[58; 71) 'id::<i128>(1)': i128
[69; 70) '1': i128
[81; 82) 'x': u64
[90; 92) 'id': fn id<u64>(T) -> T
[90; 95) 'id(1)': u64
[93; 94) '1': u64
"###
);
}
#[test]
fn infer_impl_generics() {
assert_snapshot!(
infer(r#"
struct A<T1, T2> {
x: T1,
y: T2,
}
impl<Y, X> A<X, Y> {
fn x(self) -> X {
self.x
}
fn y(self) -> Y {
self.y
}
fn z<T>(self, t: T) -> (X, Y, T) {
(self.x, self.y, t)
}
}
fn test() -> i128 {
let a = A { x: 1u64, y: 1i64 };
a.x();
a.y();
a.z(1i128);
a.z::<u128>(1);
}
"#),
@r###"
[74; 78) 'self': A<X, Y>
[85; 107) '{ ... }': X
[95; 99) 'self': A<X, Y>
[95; 101) 'self.x': X
[117; 121) 'self': A<X, Y>
[128; 150) '{ ... }': Y
[138; 142) 'self': A<X, Y>
[138; 144) 'self.y': Y
[163; 167) 'self': A<X, Y>
[169; 170) 't': T
[188; 223) '{ ... }': (X, Y, T)
[198; 217) '(self.....y, t)': (X, Y, T)
[199; 203) 'self': A<X, Y>
[199; 205) 'self.x': X
[207; 211) 'self': A<X, Y>
[207; 213) 'self.y': Y
[215; 216) 't': T
[245; 342) '{ ...(1); }': ()
[255; 256) 'a': A<u64, i64>
[259; 281) 'A { x:...1i64 }': A<u64, i64>
[266; 270) '1u64': u64
[275; 279) '1i64': i64
[287; 288) 'a': A<u64, i64>
[287; 292) 'a.x()': u64
[298; 299) 'a': A<u64, i64>
[298; 303) 'a.y()': i64
[309; 310) 'a': A<u64, i64>
[309; 319) 'a.z(1i128)': (u64, i64, i128)
[313; 318) '1i128': i128
[325; 326) 'a': A<u64, i64>
[325; 339) 'a.z::<u128>(1)': (u64, i64, u128)
[337; 338) '1': u128
"###
);
}
#[test]
fn infer_impl_generics_with_autoderef() {
assert_snapshot!(
infer(r#"
enum Option<T> {
Some(T),
None,
}
impl<T> Option<T> {
fn as_ref(&self) -> Option<&T> {}
}
fn test(o: Option<u32>) {
(&o).as_ref();
o.as_ref();
}
"#),
@r###"
[78; 82) 'self': &Option<T>
[98; 100) '{}': ()
[111; 112) 'o': Option<u32>
[127; 165) '{ ...f(); }': ()
[133; 146) '(&o).as_ref()': Option<&u32>
[134; 136) '&o': &Option<u32>
[135; 136) 'o': Option<u32>
[152; 153) 'o': Option<u32>
[152; 162) 'o.as_ref()': Option<&u32>
"###
);
}
#[test]
fn infer_generic_chain() {
assert_snapshot!(
infer(r#"
struct A<T> {
x: T,
}
impl<T2> A<T2> {
fn x(self) -> T2 {
self.x
}
}
fn id<T>(t: T) -> T { t }
fn test() -> i128 {
let x = 1;
let y = id(x);
let a = A { x: id(y) };
let z = id(a.x);
let b = A { x: z };
b.x()
}
"#),
@r###"
[53; 57) 'self': A<T2>
[65; 87) '{ ... }': T2
[75; 79) 'self': A<T2>
[75; 81) 'self.x': T2
[99; 100) 't': T
[110; 115) '{ t }': T
[112; 113) 't': T
[135; 261) '{ ....x() }': i128
[146; 147) 'x': i128
[150; 151) '1': i128
[162; 163) 'y': i128
[166; 168) 'id': fn id<i128>(T) -> T
[166; 171) 'id(x)': i128
[169; 170) 'x': i128
[182; 183) 'a': A<i128>
[186; 200) 'A { x: id(y) }': A<i128>
[193; 195) 'id': fn id<i128>(T) -> T
[193; 198) 'id(y)': i128
[196; 197) 'y': i128
[211; 212) 'z': i128
[215; 217) 'id': fn id<i128>(T) -> T
[215; 222) 'id(a.x)': i128
[218; 219) 'a': A<i128>
[218; 221) 'a.x': i128
[233; 234) 'b': A<i128>
[237; 247) 'A { x: z }': A<i128>
[244; 245) 'z': i128
[254; 255) 'b': A<i128>
[254; 259) 'b.x()': i128
"###
);
}
#[test]
fn infer_associated_const() { | impl Struct {
const FOO: u32 = 1;
}
enum Enum {}
impl Enum {
const BAR: u32 = 2;
}
trait Trait {
const ID: u32;
}
struct TraitTest;
impl Trait for TraitTest {
const ID: u32 = 5;
}
fn test() {
let x = Struct::FOO;
let y = Enum::BAR;
let z = TraitTest::ID;
}
"#),
@r###"
[52; 53) '1': u32
[105; 106) '2': u32
[213; 214) '5': u32
[229; 307) '{ ...:ID; }': ()
[239; 240) 'x': u32
[243; 254) 'Struct::FOO': u32
[264; 265) 'y': u32
[268; 277) 'Enum::BAR': u32
[287; 288) 'z': u32
[291; 304) 'TraitTest::ID': u32
"###
);
}
#[test]
fn infer_type_alias() {
assert_snapshot!(
infer(r#"
struct A<X, Y> { x: X, y: Y }
type Foo = A<u32, i128>;
type Bar<T> = A<T, u128>;
type Baz<U, V> = A<V, U>;
fn test(x: Foo, y: Bar<&str>, z: Baz<i8, u8>) {
x.x;
x.y;
y.x;
y.y;
z.x;
z.y;
}
"#),
@r###"
[116; 117) 'x': A<u32, i128>
[124; 125) 'y': A<&str, u128>
[138; 139) 'z': A<u8, i8>
[154; 211) '{ ...z.y; }': ()
[160; 161) 'x': A<u32, i128>
[160; 163) 'x.x': u32
[169; 170) 'x': A<u32, i128>
[169; 172) 'x.y': i128
[178; 179) 'y': A<&str, u128>
[178; 181) 'y.x': &str
[187; 188) 'y': A<&str, u128>
[187; 190) 'y.y': u128
[196; 197) 'z': A<u8, i8>
[196; 199) 'z.x': u8
[205; 206) 'z': A<u8, i8>
[205; 208) 'z.y': i8
"###
)
}
#[test]
fn recursive_type_alias() {
assert_snapshot!(
infer(r#"
struct A<X> {}
type Foo = Foo;
type Bar = A<Bar>;
fn test(x: Foo) {}
"#),
@r###"
[59; 60) 'x': {unknown}
[67; 69) '{}': ()
"###
)
}
#[test]
fn infer_type_param() {
assert_snapshot!(
infer(r#"
fn id<T>(x: T) -> T {
x
}
fn clone<T>(x: &T) -> T {
*x
}
fn test() {
let y = 10u32;
id(y);
let x: bool = clone(z);
id::<i128>(1);
}
"#),
@r###"
[10; 11) 'x': T
[21; 30) '{ x }': T
[27; 28) 'x': T
[44; 45) 'x': &T
[56; 66) '{ *x }': T
[62; 64) '*x': T
[63; 64) 'x': &T
[78; 158) '{ ...(1); }': ()
[88; 89) 'y': u32
[92; 97) '10u32': u32
[103; 105) 'id': fn id<u32>(T) -> T
[103; 108) 'id(y)': u32
[106; 107) 'y': u32
[118; 119) 'x': bool
[128; 133) 'clone': fn clone<bool>(&T) -> T
[128; 136) 'clone(z)': bool
[134; 135) 'z': &bool
[142; 152) 'id::<i128>': fn id<i128>(T) -> T
[142; 155) 'id::<i128>(1)': i128
[153; 154) '1': i128
"###
);
}
#[test]
fn infer_const() {
assert_snapshot!(
infer(r#"
struct Foo;
impl Foo { const ASSOC_CONST: u32 = 0; }
const GLOBAL_CONST: u32 = 101;
fn test() {
const LOCAL_CONST: u32 = 99;
let x = LOCAL_CONST;
let z = GLOBAL_CONST;
let id = Foo::ASSOC_CONST;
}
"#),
@r###"
[49; 50) '0': u32
[80; 83) '101': u32
[95; 213) '{ ...NST; }': ()
[138; 139) 'x': u32
[142; 153) 'LOCAL_CONST': u32
[163; 164) 'z': u32
[167; 179) 'GLOBAL_CONST': u32
[189; 191) 'id': u32
[194; 210) 'Foo::A..._CONST': u32
[126; 128) '99': u32
"###
);
}
#[test]
fn infer_static() {
assert_snapshot!(
infer(r#"
static GLOBAL_STATIC: u32 = 101;
static mut GLOBAL_STATIC_MUT: u32 = 101;
fn test() {
static LOCAL_STATIC: u32 = 99;
static mut LOCAL_STATIC_MUT: u32 = 99;
let x = LOCAL_STATIC;
let y = LOCAL_STATIC_MUT;
let z = GLOBAL_STATIC;
let w = GLOBAL_STATIC_MUT;
}
"#),
@r###"
[29; 32) '101': u32
[70; 73) '101': u32
[85; 280) '{ ...MUT; }': ()
[173; 174) 'x': u32
[177; 189) 'LOCAL_STATIC': u32
[199; 200) 'y': u32
[203; 219) 'LOCAL_...IC_MUT': u32
[229; 230) 'z': u32
[233; 246) 'GLOBAL_STATIC': u32
[256; 257) 'w': u32
[260; 277) 'GLOBAL...IC_MUT': u32
[118; 120) '99': u32
[161; 163) '99': u32
"###
);
}
#[test]
fn shadowing_primitive() {
let t = type_at(
r#"
//- /main.rs
struct i32;
struct Foo;
impl i32 { fn foo(&self) -> Foo { Foo } }
fn main() {
let x: i32 = i32;
x.foo()<|>;
}"#,
);
assert_eq!(t, "Foo");
}
#[test]
fn not_shadowing_primitive_by_module() {
let t = type_at(
r#"
//- /str.rs
fn foo() {}
//- /main.rs
mod str;
fn foo() -> &'static str { "" }
fn main() {
foo()<|>;
}"#,
);
assert_eq!(t, "&str");
}
#[test]
fn not_shadowing_module_by_primitive() {
let t = type_at(
r#"
//- /str.rs
fn foo() -> u32 {0}
//- /main.rs
mod str;
fn foo() -> &'static str { "" }
fn main() {
str::foo()<|>;
}"#,
);
assert_eq!(t, "u32");
}
#[test]
fn closure_return() {
assert_snapshot!(
infer(r#"
fn foo() -> u32 {
let x = || -> usize { return 1; };
}
"#),
@r###"
[17; 59) '{ ...; }; }': ()
[27; 28) 'x': || -> usize
[31; 56) '|| -> ...n 1; }': || -> usize
[43; 56) '{ return 1; }': !
[45; 53) 'return 1': !
[52; 53) '1': usize
"###
);
}
#[test]
fn closure_return_unit() {
assert_snapshot!(
infer(r#"
fn foo() -> u32 {
let x = || { return; };
}
"#),
@r###"
[17; 48) '{ ...; }; }': ()
[27; 28) 'x': || -> ()
[31; 45) '|| { return; }': || -> ()
[34; 45) '{ return; }': !
[36; 42) 'return': !
"###
);
}
#[test]
fn closure_return_inferred() {
assert_snapshot!(
infer(r#"
fn foo() -> u32 {
let x = || { "test" };
}
"#),
@r###"
[17; 47) '{ ..." }; }': ()
[27; 28) 'x': || -> &str
[31; 44) '|| { "test" }': || -> &str
[34; 44) '{ "test" }': &str
[36; 42) '"test"': &str
"###
);
} | assert_snapshot!(
infer(r#"
struct Struct;
|
metadata.go | // Copyright 2017 Monax Industries Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package logging
import (
"time"
"github.com/go-kit/kit/log"
"github.com/go-stack/stack"
"github.com/monax/hoard/logging/structure"
)
const (
traceLength = 5
traceOffset = 2
)
var defaultTimestampUTCValuer log.Valuer = func() interface{} {
return time.Now()
}
func WithMetadata(logger log.Logger) log.Logger {
return log.With(logger, structure.TimeKey, log.DefaultTimestampUTC,
structure.StackTraceKey, TraceValuer())
}
func TraceValuer() log.Valuer | {
return func() interface{} {
return stack.Trace().
TrimBelow(stack.Caller(traceOffset)).
TrimAbove(stack.Caller(traceLength + 1))
}
} |
|
error_atman.go | package os
func | (err error) bool {
return false
}
func isNotExist(err error) bool {
return false
}
func isPermission(err error) bool {
return false
}
| isExist |
list_organizations_test.py | # -*- coding: utf-8 -*-
import pytest
from h.models import Organization
from h.services.list_organizations import (
ListOrganizationsService,
list_organizations_factory,
)
from h.services.organization import organization_factory
class TestListOrganizations:
def test_returns_organizations_from_all_authorities_if_no_authority_specified(
self, svc, organizations, default_orgs, alternate_organizations
):
expected_orgs = default_orgs + organizations + alternate_organizations
results = svc.organizations()
assert results == expected_orgs
def test_returns_organizations_for_the_authority_specified(
self,
svc,
authority,
organizations,
alternate_organizations,
alternate_authority,
):
results = svc.organizations(authority=alternate_authority)
assert results == alternate_organizations
class TestListOrganizationsFactory:
def test_list_organizations_factory(self, pyramid_request):
svc = list_organizations_factory(None, pyramid_request)
assert isinstance(svc, ListOrganizationsService)
def test_provides_request_db_as_session(self, pyramid_request):
svc = list_organizations_factory(None, pyramid_request)
assert svc._session == pyramid_request.db
@pytest.fixture
def authority(pyramid_request):
return pyramid_request.default_authority
@pytest.fixture
def alternate_authority():
return "bar.com" | @pytest.fixture
def org_svc(pyramid_request):
return organization_factory(None, pyramid_request)
@pytest.fixture
def organizations(factories, authority, org_svc):
# Add these out of order so it will come back out of order if unsorted..
org2 = org_svc.create(name="Org2", authority=authority)
org1 = org_svc.create(name="Org1", authority=authority)
return [org1, org2]
@pytest.fixture
def alternate_organizations(factories, alternate_authority, org_svc):
# Add these out of order so it will come back out of order if unsorted..
org4 = org_svc.create(name="Org4", authority=alternate_authority)
org3 = org_svc.create(name="Org3", authority=alternate_authority)
return [org3, org4]
@pytest.fixture
def default_orgs(db_session):
return [Organization.default(db_session)]
@pytest.fixture
def svc(db_session):
return ListOrganizationsService(session=db_session) | |
CommandInteraction.js | 'use strict';
const Interaction = require('./Interaction');
const InteractionWebhook = require('./InteractionWebhook');
const InteractionResponses = require('./interfaces/InteractionResponses');
const Collection = require('../util/Collection');
const { ApplicationCommandOptionTypes } = require('../util/Constants');
/**
* Represents a command interaction.
* @extends {Interaction}
* @implements {InteractionResponses}
*/
class CommandInteraction extends Interaction {
constructor(client, data) {
super(client, data);
/**
* The channel this interaction was sent in
* @type {?(TextChannel|NewsChannel|DMChannel)}
* @name CommandInteraction#channel
* @readonly
*/
/**
* The ID of the channel this interaction was sent in
* @type {Snowflake}
* @name CommandInteraction#channelID
*/
/**
* The ID of the invoked application command
* @type {Snowflake}
*/
this.commandID = data.data.id;
/**
* The name of the invoked application command
* @type {string}
*/
this.commandName = data.data.name;
/**
* Whether the reply to this interaction has been deferred
* @type {boolean}
*/
this.deferred = false;
/**
* The options passed to the command.
* @type {Collection<string, CommandInteractionOption>}
*/
this.options = this._createOptionsCollection(data.data.options, data.data.resolved);
/**
* Whether this interaction has already been replied to
* @type {boolean}
*/
this.replied = false;
/**
* Whether the reply to this interaction is ephemeral
* @type {?boolean}
*/
this.ephemeral = null;
/**
* An associated interaction webhook, can be used to further interact with this interaction
* @type {InteractionWebhook}
*/
this.webhook = new InteractionWebhook(this.client, this.applicationID, this.token);
}
/**
* The invoked application command, if it was fetched before
* @type {?ApplicationCommand}
*/
get command() {
const id = this.commandID;
return this.guild?.commands.cache.get(id) ?? this.client.application.commands.cache.get(id) ?? null;
}
/**
* Represents an option of a received command interaction.
* @typedef {Object} CommandInteractionOption
* @property {string} name The name of the option
* @property {ApplicationCommandOptionType} type The type of the option
* @property {string|number|boolean} [value] The value of the option
* @property {Collection<string, CommandInteractionOption>} [options] Additional options if this option is a
* subcommand (group)
* @property {User} [user] The resolved user
* @property {GuildMember|APIGuildMember} [member] The resolved member
* @property {GuildChannel|APIChannel} [channel] The resolved channel
* @property {Role|APIRole} [role] The resolved role
*/
/**
* Transforms an option received from the API.
* @param {APIApplicationCommandOption} option The received option
* @param {APIApplicationCommandOptionResolved} resolved The resolved interaction data
* @returns {CommandInteractionOption}
* @private
*/
transformOption(option, resolved) {
const result = {
name: option.name,
type: ApplicationCommandOptionTypes[option.type],
};
if ('value' in option) result.value = option.value;
if ('options' in option) result.options = this._createOptionsCollection(option.options, resolved);
if (resolved) {
const user = resolved.users?.[option.value];
if (user) result.user = this.client.users.add(user);
const member = resolved.members?.[option.value];
if (member) result.member = this.guild?.members.add({ user, ...member }) ?? member;
const channel = resolved.channels?.[option.value];
if (channel) result.channel = this.client.channels.add(channel, this.guild) ?? channel;
const role = resolved.roles?.[option.value];
if (role) result.role = this.guild?.roles.add(role) ?? role;
}
return result;
}
/**
* Creates a collection of options from the received options array.
* @param {APIApplicationCommandOption[]} options The received options
* @param {APIApplicationCommandOptionResolved} resolved The resolved interaction data | * @private
*/
_createOptionsCollection(options, resolved) {
const optionsCollection = new Collection();
if (typeof options === 'undefined') return optionsCollection;
for (const option of options) {
optionsCollection.set(option.name, this.transformOption(option, resolved));
}
return optionsCollection;
}
// These are here only for documentation purposes - they are implemented by InteractionResponses
/* eslint-disable no-empty-function */
defer() {}
reply() {}
fetchReply() {}
editReply() {}
deleteReply() {}
followUp() {}
}
InteractionResponses.applyToClass(CommandInteraction, ['deferUpdate', 'update']);
module.exports = CommandInteraction;
/* eslint-disable max-len */
/**
* @external APIApplicationCommandOptionResolved
* @see {@link https://discord.com/developers/docs/interactions/slash-commands#interaction-applicationcommandinteractiondataresolved}
*/ | * @returns {Collection<string, CommandInteractionOption>} |
sockopt_linux.go | // +build linux,!386
package raw
import (
"syscall"
"unsafe"
)
// setsockopt provides access to the setsockopt syscall.
func setsockopt(fd, level, name int, v unsafe.Pointer, l uint32) error {
_, _, errno := syscall.Syscall6(
syscall.SYS_SETSOCKOPT,
uintptr(fd),
uintptr(level),
uintptr(name),
uintptr(v),
uintptr(l),
0,
)
if errno != 0 {
return error(errno)
}
return nil | } |
|
bpf_syscall.go | // Copyright (c) 2019-2020 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bpf
import (
"reflect"
"runtime"
"strings"
"sync"
"syscall"
"time"
"unsafe"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/bpf/asm"
"golang.org/x/sys/unix"
)
// #include "bpf_syscall.h"
import "C"
func SyscallSupport() bool {
return true
}
func GetMapFDByPin(filename string) (MapFD, error) {
log.Debugf("GetMapFDByPin(%v)", filename)
bpfAttr := C.bpf_attr_alloc()
defer C.free(unsafe.Pointer(bpfAttr))
cFilename := C.CString(filename)
defer C.free(unsafe.Pointer(cFilename))
C.bpf_attr_setup_obj_get(bpfAttr, cFilename, 0)
fd, _, errno := unix.Syscall(unix.SYS_BPF, unix.BPF_OBJ_GET, uintptr(unsafe.Pointer(bpfAttr)), C.sizeof_union_bpf_attr)
if errno != 0 {
return 0, errno
}
return MapFD(fd), nil
}
func GetMapFDByID(mapID int) (MapFD, error) {
log.Debugf("GetMapFDByID(%v)", mapID)
bpfAttr := C.bpf_attr_alloc()
defer C.free(unsafe.Pointer(bpfAttr))
C.bpf_attr_setup_obj_get_id(bpfAttr, C.uint(mapID), 0)
fd, _, errno := unix.Syscall(unix.SYS_BPF, unix.BPF_MAP_GET_FD_BY_ID, uintptr(unsafe.Pointer(bpfAttr)), C.sizeof_union_bpf_attr)
if errno != 0 {
return 0, errno
}
return MapFD(fd), nil
}
const defaultLogSize = 1024 * 1024
const maxLogSize = 128 * 1024 * 1024
func LoadBPFProgramFromInsns(insns asm.Insns, license string) (fd ProgFD, err error) {
log.Debugf("LoadBPFProgramFromInsns(%v, %v)", insns, license)
increaseLockedMemoryQuota()
// Occasionally see retryable errors here, retry silently a few times before going into log-collection mode.
backoff := 1 * time.Millisecond
for retries := 10; retries > 0; retries-- {
// By default, try to load the program with logging disabled. This has two advantages: better performance
// and the fact that the log cannot overflow.
fd, err = tryLoadBPFProgramFromInsns(insns, license, 0)
if err == nil {
log.WithField("fd", fd).Debug("Loaded program successfully")
return fd, nil
}
log.WithError(err).Debug("Error loading BPF program; will retry.")
time.Sleep(backoff)
backoff *= 2
}
// Retry again, passing a log buffer to get the diagnostics from the kernel.
log.WithError(err).Warn("Failed to load BPF program; collecting diagnostics...")
var logSize uint = defaultLogSize
for {
fd, err2 := tryLoadBPFProgramFromInsns(insns, license, logSize)
if err2 == nil {
// Unexpected but we'll take it.
log.Warn("Retry succeeded.")
return fd, nil
}
if err2 == unix.ENOSPC && logSize < maxLogSize {
// Log buffer was too small.
log.Warn("Diagnostics buffer was too small, trying again with a larger buffer.")
logSize *= 2
continue
}
if err != err2 {
log.WithError(err2).Error("Retry failed with a different error.")
}
return 0, err
}
}
func tryLoadBPFProgramFromInsns(insns asm.Insns, license string, logSize uint) (ProgFD, error) |
var memLockOnce sync.Once
func increaseLockedMemoryQuota() {
memLockOnce.Do(func() {
err := unix.Setrlimit(unix.RLIMIT_MEMLOCK, &unix.Rlimit{Cur: unix.RLIM_INFINITY, Max: unix.RLIM_INFINITY})
if err != nil {
log.WithError(err).Error("Failed to increase RLIMIT_MEMLOCK, loading BPF programs may fail")
}
})
}
func RunBPFProgram(fd ProgFD, dataIn []byte, repeat int) (pr ProgResult, err error) {
log.Debugf("RunBPFProgram(%v, ..., %v)", fd, repeat)
bpfAttr := C.bpf_attr_alloc()
defer C.free(unsafe.Pointer(bpfAttr))
cDataIn := C.CBytes(dataIn)
defer C.free(cDataIn)
const dataOutBufSize = 4096
cDataOut := C.malloc(dataOutBufSize)
defer C.free(cDataOut)
var errno syscall.Errno
for attempts := 3; attempts > 0; attempts-- {
C.bpf_attr_setup_prog_run(bpfAttr, C.uint(fd), C.uint(len(dataIn)), cDataIn, C.uint(dataOutBufSize), cDataOut, C.uint(repeat))
_, _, errno = unix.Syscall(unix.SYS_BPF, unix.BPF_PROG_TEST_RUN, uintptr(unsafe.Pointer(bpfAttr)), C.sizeof_union_bpf_attr)
if errno == unix.EINTR {
// We hit this if a Go profiling timer pops while we're in the syscall.
log.Debug("BPF_PROG_TEST_RUN hit EINTR")
continue
}
break
}
if errno != 0 {
err = errno
return
}
pr.RC = int32(C.bpf_attr_prog_run_retval(bpfAttr))
dataOutSize := C.bpf_attr_prog_run_data_out_size(bpfAttr)
pr.Duration = time.Duration(C.bpf_attr_prog_run_data_out_size(bpfAttr))
pr.DataOut = C.GoBytes(cDataOut, C.int(dataOutSize))
return
}
func PinBPFProgram(fd ProgFD, filename string) error {
bpfAttr := C.bpf_attr_alloc()
defer C.free(unsafe.Pointer(bpfAttr))
cFilename := C.CString(filename)
defer C.free(unsafe.Pointer(cFilename))
C.bpf_attr_setup_obj_pin(bpfAttr, cFilename, C.uint(fd), 0)
_, _, errno := unix.Syscall(unix.SYS_BPF, unix.BPF_OBJ_PIN, uintptr(unsafe.Pointer(bpfAttr)), C.sizeof_union_bpf_attr)
if errno != 0 {
return errno
}
return nil
}
func UpdateMapEntry(mapFD MapFD, k, v []byte) error {
log.Debugf("UpdateMapEntry(%v, %v, %v)", mapFD, k, v)
err := checkMapIfDebug(mapFD, len(k), len(v))
if err != nil {
return err
}
bpfAttr := C.bpf_attr_alloc()
defer C.free(unsafe.Pointer(bpfAttr))
cK := C.CBytes(k)
defer C.free(cK)
cV := C.CBytes(v)
defer C.free(cV)
C.bpf_attr_setup_map_elem(bpfAttr, C.uint(mapFD), cK, cV, unix.BPF_ANY)
_, _, errno := unix.Syscall(unix.SYS_BPF, unix.BPF_MAP_UPDATE_ELEM, uintptr(unsafe.Pointer(bpfAttr)), C.sizeof_union_bpf_attr)
if errno != 0 {
return errno
}
return nil
}
func GetMapEntry(mapFD MapFD, k []byte, valueSize int) ([]byte, error) {
log.Debugf("GetMapEntry(%v, %v, %v)", mapFD, k, valueSize)
err := checkMapIfDebug(mapFD, len(k), valueSize)
if err != nil {
return nil, err
}
val := make([]byte, valueSize)
errno := C.bpf_map_call(unix.BPF_MAP_LOOKUP_ELEM, C.uint(mapFD),
unsafe.Pointer(&k[0]), unsafe.Pointer(&val[0]), 0)
if errno != 0 {
return nil, unix.Errno(errno)
}
return val, nil
}
func checkMapIfDebug(mapFD MapFD, keySize, valueSize int) error {
if log.GetLevel() >= log.DebugLevel {
mapInfo, err := GetMapInfo(mapFD)
if err != nil {
log.WithError(err).Error("Failed to read map information")
return err
}
log.WithField("mapInfo", mapInfo).Debug("Map metadata")
if keySize != mapInfo.KeySize {
log.WithField("mapInfo", mapInfo).WithField("keyLen", keySize).Panic("Incorrect key length")
}
if valueSize >= 0 && valueSize != mapInfo.ValueSize {
log.WithField("mapInfo", mapInfo).WithField("valueLen", valueSize).Panic("Incorrect value length")
}
}
return nil
}
func GetMapInfo(fd MapFD) (*MapInfo, error) {
bpfAttr := C.bpf_attr_alloc()
defer C.free(unsafe.Pointer(bpfAttr))
var bpfMapInfo *C.struct_bpf_map_info = (*C.struct_bpf_map_info)(C.malloc(C.sizeof_struct_bpf_map_info))
defer C.free(unsafe.Pointer(bpfMapInfo))
C.bpf_attr_setup_get_info(bpfAttr, C.uint(fd), C.sizeof_struct_bpf_map_info, unsafe.Pointer(bpfMapInfo))
_, _, errno := unix.Syscall(unix.SYS_BPF, unix.BPF_OBJ_GET_INFO_BY_FD, uintptr(unsafe.Pointer(bpfAttr)), C.sizeof_union_bpf_attr)
if errno != 0 {
return nil, errno
}
return &MapInfo{
Type: int(bpfMapInfo._type),
KeySize: int(bpfMapInfo.key_size),
ValueSize: int(bpfMapInfo.value_size),
}, nil
}
func DeleteMapEntry(mapFD MapFD, k []byte, valueSize int) error {
log.Debugf("DeleteMapEntry(%v, %v, %v)", mapFD, k, valueSize)
err := checkMapIfDebug(mapFD, len(k), valueSize)
if err != nil {
return err
}
errno := C.bpf_map_call(unix.BPF_MAP_DELETE_ELEM, C.uint(mapFD),
unsafe.Pointer(&k[0]), unsafe.Pointer(nil), 0)
if errno != 0 {
return unix.Errno(errno)
}
return nil
}
// Batch size established by trial and error; 8-32 seemed to be the sweet spot for the conntrack map.
const MapIteratorNumKeys = 16
// MapIterator handles one pass of iteration over the map.
type MapIterator struct {
// Metadata about the map.
mapFD MapFD
maxEntries int
valueSize int
keySize int
// The values below point to the C heap. We must allocate the key and value buffers on the C heap
// because we pass them to the kernel as pointers contained in the bpf_attr union. That extra level of
// indirection defeats Go's special handling of pointers when passing them to the syscall. If we allocated the
// keys and values as slices and the garbage collector decided to move the backing memory of the slices then
// the pointers we write to the bpf_attr union could end up being stale (since the union is opaque to the
// garbage collector).
// keyBeforeNextBatch is either nil at start of day or points to a buffer containing the key to pass to
// bpf_map_load_multi.
keyBeforeNextBatch unsafe.Pointer
// keys points to a buffer containing up to MapIteratorNumKeys keys
keys unsafe.Pointer
// values points to a buffer containing up to MapIteratorNumKeys values
values unsafe.Pointer
// valueStride is the step through the values buffer. I.e. the size of the value rounded up for alignment.
valueStride int
// keyStride is the step through the keys buffer. I.e. the size of the key rounded up for alignment.
keyStride int
// numEntriesLoaded is the number of valid entries in the key and values buffers.
numEntriesLoaded int
// entryIdx is the index of the next key/value to return.
entryIdx int
// numEntriesVisited is incremented for each entry that we visit. Used as a sanity check in case we go into an
// infinite loop.
numEntriesVisited int
}
// align64 rounds up the given size to the nearest 8-bytes.
func align64(size int) int {
if size%8 == 0 {
return size
}
return size + (8 - (size % 8))
}
func NewMapIterator(mapFD MapFD, keySize, valueSize, maxEntries int) (*MapIterator, error) {
err := checkMapIfDebug(mapFD, keySize, valueSize)
if err != nil {
return nil, err
}
keyStride := align64(keySize)
valueStride := align64(valueSize)
keysBufSize := (C.size_t)(keyStride * MapIteratorNumKeys)
valueBufSize := (C.size_t)(valueStride * MapIteratorNumKeys)
m := &MapIterator{
mapFD: mapFD,
maxEntries: maxEntries,
keySize: keySize,
valueSize: valueSize,
keyStride: keyStride,
valueStride: valueStride,
keys: C.malloc(keysBufSize),
values: C.malloc(valueBufSize),
}
C.memset(m.keys, 0, (C.size_t)(keysBufSize))
C.memset(m.values, 0, (C.size_t)(valueBufSize))
// Make sure the C buffers are cleaned up.
runtime.SetFinalizer(m, func(m *MapIterator) {
err := m.Close()
if err != nil {
log.WithError(err).Panic("Unexpected error from MapIterator.Close().")
}
})
return m, nil
}
// Next gets the next key/value pair from the iteration. The key and value []byte slices returned point to the
// MapIterator's internal buffers (which are allocated on the C heap); they should not be retained or modified.
// Returns ErrIterationFinished at the end of the iteration or ErrVisitedTooManyKeys if it visits considerably more
// keys than the maximum size of the map.
func (m *MapIterator) Next() (k, v []byte, err error) {
if m.numEntriesLoaded == m.entryIdx {
// Need to load a new batch of KVs from the kernel.
var count C.int
rc := C.bpf_map_load_multi(C.uint(m.mapFD), m.keyBeforeNextBatch, MapIteratorNumKeys, C.int(m.keyStride), m.keys, C.int(m.valueStride), m.values)
if rc < 0 {
err = unix.Errno(-rc)
return
}
count = rc
if count == 0 {
// No error but no keys either. We're done.
err = ErrIterationFinished
return
}
m.numEntriesLoaded = int(count)
m.entryIdx = 0
if m.keyBeforeNextBatch == nil {
m.keyBeforeNextBatch = C.malloc((C.size_t)(m.keySize))
}
C.memcpy(m.keyBeforeNextBatch, unsafe.Pointer(uintptr(m.keys)+uintptr(m.keyStride*(m.numEntriesLoaded-1))), (C.size_t)(m.keySize))
}
currentKeyPtr := unsafe.Pointer(uintptr(m.keys) + uintptr(m.keyStride*(m.entryIdx)))
currentValPtr := unsafe.Pointer(uintptr(m.values) + uintptr(m.valueStride*(m.entryIdx)))
k = ptrToSlice(currentKeyPtr, m.keySize)
v = ptrToSlice(currentValPtr, m.valueSize)
m.entryIdx++
m.numEntriesVisited++
if m.numEntriesVisited > m.maxEntries*10 {
// Either a bug or entries are being created 10x faster than we're iterating through them?
err = ErrVisitedTooManyKeys
return
}
return
}
func ptrToSlice(ptr unsafe.Pointer, size int) (b []byte) {
keySliceHdr := (*reflect.SliceHeader)(unsafe.Pointer(&b))
keySliceHdr.Data = uintptr(ptr)
keySliceHdr.Cap = size
keySliceHdr.Len = size
return
}
func (m *MapIterator) Close() error {
C.free(m.keyBeforeNextBatch)
m.keyBeforeNextBatch = nil
C.free(m.keys)
m.keys = nil
C.free(m.values)
m.values = nil
// Don't need the finalizer any more.
runtime.SetFinalizer(m, nil)
return nil
}
| {
log.Debugf("tryLoadBPFProgramFromInsns(..., %v, %v)", license, logSize)
bpfAttr := C.bpf_attr_alloc()
defer C.free(unsafe.Pointer(bpfAttr))
cInsnBytes := C.CBytes(insns.AsBytes())
defer C.free(cInsnBytes)
cLicense := C.CString(license)
defer C.free(unsafe.Pointer(cLicense))
var logBuf unsafe.Pointer
var logLevel uint
if logSize > 0 {
logLevel = 1
logBuf = C.malloc((C.size_t)(logSize))
defer C.free(logBuf)
}
C.bpf_attr_setup_load_prog(bpfAttr, unix.BPF_PROG_TYPE_SCHED_CLS, C.uint(len(insns)), cInsnBytes, cLicense, (C.uint)(logLevel), (C.uint)(logSize), logBuf)
fd, _, errno := unix.Syscall(unix.SYS_BPF, unix.BPF_PROG_LOAD, uintptr(unsafe.Pointer(bpfAttr)), C.sizeof_union_bpf_attr)
if errno != 0 && errno != unix.ENOSPC /* log buffer too small */ {
goLog := strings.TrimSpace(C.GoString((*C.char)(logBuf)))
log.WithError(errno).Debug("BPF_PROG_LOAD failed")
if len(goLog) > 0 {
for _, l := range strings.Split(goLog, "\n") {
log.Error("BPF Verifier: ", l)
}
} else if logSize > 0 {
log.Error("Verifier log was empty.")
}
}
if errno != 0 {
return 0, errno
}
return ProgFD(fd), nil
} |
wallet_action_store.py | from typing import List, Optional
import aiosqlite
from btcgreen.util.db_wrapper import DBWrapper
from btcgreen.util.ints import uint32
from btcgreen.wallet.util.wallet_types import WalletType
from btcgreen.wallet.wallet_action import WalletAction
class WalletActionStore:
"""
WalletActionStore keeps track of all wallet actions that require persistence.
Used by Colored coins, Atomic swaps, Rate Limited, and Authorized payee wallets
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
|
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM action_queue")
await cursor.close()
await self.db_connection.commit()
async def get_wallet_action(self, id: int) -> Optional[WalletAction]:
"""
Return a wallet action by id
"""
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
async def create_action(
self, name: str, wallet_id: int, type: int, callback: str, done: bool, data: str, in_transaction: bool
):
"""
Creates Wallet Action
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT INTO action_queue VALUES(?, ?, ?, ?, ?, ?, ?)",
(None, name, wallet_id, type, callback, done, data),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def action_done(self, action_id: int):
"""
Marks action as done
"""
action: Optional[WalletAction] = await self.get_wallet_action(action_id)
assert action is not None
async with self.db_wrapper.lock:
cursor = await self.db_connection.execute(
"Replace INTO action_queue VALUES(?, ?, ?, ?, ?, ?, ?)",
(
action.id,
action.name,
action.wallet_id,
action.type.value,
action.wallet_callback,
True,
action.data,
),
)
await cursor.close()
await self.db_connection.commit()
async def get_all_pending_actions(self) -> List[WalletAction]:
"""
Returns list of all pending action
"""
result: List[WalletAction] = []
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE done=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
if rows is None:
return result
for row in rows:
action = WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
result.append(action)
return result
async def get_action_by_id(self, id) -> Optional[WalletAction]:
"""
Return a wallet action by id
"""
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
| self = cls()
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS action_queue("
"id INTEGER PRIMARY KEY AUTOINCREMENT,"
" name text,"
" wallet_id int,"
" wallet_type int,"
" wallet_callback text,"
" done int,"
" data text)"
)
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS name on action_queue(name)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on action_queue(wallet_id)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_type on action_queue(wallet_type)")
await self.db_connection.commit()
return self |
test_graphql.py | from functools import partial
from unittest.mock import Mock, patch
import graphene
import pytest
from django.contrib.auth.models import AnonymousUser
from django.db.models import Q
from django.shortcuts import reverse
from graphql.error import GraphQLError
from graphql_relay import to_global_id
from ...core.utils import from_global_id_or_error
from ...product.types import Product
from ...tests.utils import get_graphql_content
from ...utils import get_nodes
from ...utils.filters import filter_by_query_param
def test_middleware_dont_generate_sql_requests(client, settings, assert_num_queries):
"""When requesting on the GraphQL API endpoint, no SQL request should happen
indirectly. This test ensures that."""
# Enables the Graphql playground
settings.DEBUG = True
with assert_num_queries(0):
response = client.get(reverse("api"))
assert response.status_code == 200
def test_jwt_middleware(client, admin_user):
user_details_query = """
{
me {
email
}
}
"""
create_token_query = """
mutation {
tokenCreate(email: "[email protected]", password: "password") {
token
}
}
"""
api_url = reverse("api")
api_client_post = partial(client.post, api_url, content_type="application/json")
# test setting AnonymousUser on unauthorized request to API
response = api_client_post(data={"query": user_details_query})
repl_data = response.json()
assert response.status_code == 200
assert isinstance(response.wsgi_request.user, AnonymousUser)
assert repl_data["data"]["me"] is None
# test creating a token for admin user
response = api_client_post(data={"query": create_token_query})
repl_data = response.json()
assert response.status_code == 200
assert response.wsgi_request.user == admin_user
token = repl_data["data"]["tokenCreate"]["token"]
assert token is not None
# test request with proper JWT token authorizes the request to API
response = api_client_post(
data={"query": user_details_query}, HTTP_AUTHORIZATION=f"JWT {token}"
)
repl_data = response.json()
assert response.status_code == 200
assert response.wsgi_request.user == admin_user
assert "errors" not in repl_data
assert repl_data["data"]["me"] == {"email": admin_user.email}
def test_real_query(user_api_client, product, channel_USD):
product_attr = product.product_type.product_attributes.first()
category = product.category
attr_value = product_attr.values.first()
query = """
query Root($categoryId: ID!, $sortBy: ProductOrder, $first: Int,
$attributesFilter: [AttributeInput], $channel: String) {
category(id: $categoryId) {
...CategoryPageFragmentQuery
__typename
}
products(first: $first, sortBy: $sortBy, filter: {categories: [$categoryId],
attributes: $attributesFilter}, channel: $channel) {
...ProductListFragmentQuery
__typename
}
attributes(first: 20, filter: {inCategory: $categoryId}, channel: $channel) {
edges {
node {
...ProductFiltersFragmentQuery
__typename
}
}
}
}
fragment CategoryPageFragmentQuery on Category {
id
name
ancestors(first: 20) {
edges {
node {
name
id
__typename | children(first: 20) {
edges {
node {
name
id
slug
__typename
}
}
}
__typename
}
fragment ProductListFragmentQuery on ProductCountableConnection {
edges {
node {
...ProductFragmentQuery
__typename
}
__typename
}
pageInfo {
hasNextPage
__typename
}
__typename
}
fragment ProductFragmentQuery on Product {
id
isAvailable
name
pricing {
...ProductPriceFragmentQuery
__typename
}
thumbnailUrl1x: thumbnail(size: 255){
url
}
thumbnailUrl2x: thumbnail(size: 510){
url
}
__typename
}
fragment ProductPriceFragmentQuery on ProductPricingInfo {
discount {
gross {
amount
currency
__typename
}
__typename
}
priceRange {
stop {
gross {
amount
currency
__typename
}
currency
__typename
}
start {
gross {
amount
currency
__typename
}
currency
__typename
}
__typename
}
__typename
}
fragment ProductFiltersFragmentQuery on Attribute {
id
name
slug
choices(first: 10) {
edges {
node {
id
name
slug
__typename
}
}
}
__typename
}
"""
variables = {
"categoryId": graphene.Node.to_global_id("Category", category.id),
"sortBy": {"field": "NAME", "direction": "ASC"},
"first": 1,
"attributesFilter": [
{"slug": f"{product_attr.slug}", "values": [f"{attr_value.slug}"]}
],
"channel": channel_USD.slug,
}
response = user_api_client.post_graphql(query, variables)
get_graphql_content(response)
def test_get_nodes(product_list):
global_ids = [to_global_id("Product", product.pk) for product in product_list]
# Make sure function works even if duplicated ids are provided
global_ids.append(to_global_id("Product", product_list[0].pk))
# Return products corresponding to global ids
products = get_nodes(global_ids, Product)
assert products == product_list
# Raise an error if requested id has no related database object
nonexistent_item = Mock(type="Product", pk=-1)
nonexistent_item_global_id = to_global_id(
nonexistent_item.type, nonexistent_item.pk
)
global_ids.append(nonexistent_item_global_id)
msg = "There is no node of type {} with pk {}".format(
nonexistent_item.type, nonexistent_item.pk
)
with pytest.raises(AssertionError) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
global_ids.pop()
# Raise an error if one of the node is of wrong type
invalid_item = Mock(type="test", pk=-1)
invalid_item_global_id = to_global_id(invalid_item.type, invalid_item.pk)
global_ids.append(invalid_item_global_id)
with pytest.raises(GraphQLError) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (f"Must receive Product id: {invalid_item_global_id}.",)
# Raise an error if no nodes were found
global_ids = []
msg = f"Could not resolve to a node with the global id list of '{global_ids}'."
with pytest.raises(Exception) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
# Raise an error if pass wrong ids
global_ids = ["a", "bb"]
msg = f"Could not resolve to a node with the global id list of '{global_ids}'."
with pytest.raises(Exception) as exc:
get_nodes(global_ids, Product)
assert exc.value.args == (msg,)
@patch("saleor.product.models.Product.objects")
def test_filter_by_query_param(qs):
qs.filter.return_value = qs
qs = filter_by_query_param(qs, "test", ["name", "force"])
test_kwargs = {"name__icontains": "test", "force__icontains": "test"}
q_objects = Q()
for q in test_kwargs:
q_objects |= Q(**{q: test_kwargs[q]})
# FIXME: django 1.11 fails on called_once_with(q_objects)
qs.filter.call_count == 1
def test_from_global_id_or_error(product):
invalid_id = "invalid"
message = f"Couldn't resolve id: {invalid_id}."
with pytest.raises(GraphQLError) as error:
from_global_id_or_error(invalid_id)
assert str(error.value) == message
def test_from_global_id_or_error_wth_invalid_type(product):
product_id = graphene.Node.to_global_id("Product", product.id)
message = "Must receive a ProductVariant id."
with pytest.raises(GraphQLError) as error:
from_global_id_or_error(product_id, "ProductVariant", raise_error=True)
assert str(error.value) == message
def test_from_global_id_or_error_wth_type(product):
expected_product_type = str(Product)
expected_product_id = graphene.Node.to_global_id(expected_product_type, product.id)
product_type, product_id = from_global_id_or_error(
expected_product_id, expected_product_type
)
assert product_id == str(product.id)
assert product_type == expected_product_type | }
}
} |
data_options.py | import string
from itertools import chain
from math import ceil, floor
from random import randint, choice, random, choices, shuffle
male_first_names = (
"Liam", "Noah", "Oliver", "Elijah", "William", "James", "Benjamin", "Lucas",
"Henry", "Alexander", "Mason", "Michael", "Ethan", "Daniel", "Jacob",
"Logan", "Jackson", "Levi", "Sebastian", "Mateo", "Jack", "Owen",
"Theodore", "Aiden", "Samuel", "Joseph", "John", "David", "Wyatt",
"Matthew", "Luke", "Asher", "Carter", "Julian", "Grayson", "Leo", "Jayden",
"Gabriel", "Isaac", "Lincoln", "Anthony", "Hudson", "Dylan", "Ezra",
"Thomas", "Charles", "Christopher", "Jaxon", "Maverick", "Josiah", "Isaiah",
"Andrew", "Elias", "Joshua", "Nathan", "Caleb", "Ryan", "Adrian", "Miles",
"Eli", "Nolan", "Christian", "Aaron", "Cameron", "Ezekiel", "Colton",
"Luca", "Landon", "Hunter", "Jonathan", "Santiago", "Axel", "Easton",
"Cooper", "Jeremiah", "Angel", "Roman", "Connor", "Jameson", "Robert",
"Greyson", "Jordan", "Ian", "Carson", "Jaxson", "Leonardo", "Nicholas",
"Dominic", "Austin", "Everett", "Brooks", "Xavier", "Kai", "Jose", "Parker",
"Adam", "Jace", "Wesley", "Kayden", "Silas", "Bennett", "Declan", "Waylon",
"Weston", "Evan", "Emmett", "Micah", "Ryder", "Beau", "Damian", "Brayden",
"Gael", "Rowan", "Harrison", "Bryson", "Sawyer", "Amir", "Kingston",
"Jason", "Giovanni", "Vincent", "Ayden", "Chase", "Myles", "Diego",
"Nathaniel", "Legend", "Jonah", "River", "Tyler", "Cole", "Braxton",
"George", "Milo", "Zachary", "Ashton", "Luis", "Jasper", "Kaiden", "Adriel",
"Gavin", "Bentley", "Calvin", "Zion", "Juan", "Maxwell", "Max", "Ryker",
"Carlos", "Emmanuel", "Jayce", "Lorenzo", "Ivan", "Jude", "August", "Kevin",
"Malachi", "Elliott", "Rhett", "Archer", "Karter", "Arthur", "Luka",
"Elliot", "Thiago", "Brandon", "Camden", "Justin", "Jesus", "Maddox",
"King", "Theo", "Enzo", "Matteo", "Emiliano", "Dean", "Hayden", "Finn",
"Brody", "Antonio", "Abel", "Alex", "Tristan", "Graham", "Zayden", "Judah",
"Xander", "Miguel", "Atlas", "Messiah", "Barrett", "Tucker", "Timothy",
"Alan", "Edward", "Leon", "Dawson", "Eric", "Ace", "Victor", "Abraham",
"Nicolas", "Jesse", "Charlie", "Patrick", "Walker", "Joel", "Richard",
"Beckett", "Blake", "Alejandro", "Avery", "Grant", "Peter", "Oscar",
"Matias", "Amari", "Lukas", "Andres", "Arlo", "Colt", "Adonis", "Kyrie",
"Steven", "Felix", "Preston", "Marcus", "Holden", "Emilio", "Remington",
"Jeremy", "Kaleb", "Brantley", "Bryce", "Mark", "Knox", "Israel", "Phoenix",
"Kobe", "Nash", "Griffin", "Caden", "Kenneth", "Kyler", "Hayes", "Jax",
"Rafael", "Beckham", "Javier", "Maximus", "Simon", "Paul", "Omar", "Kaden",
"Kash", "Lane", "Bryan", "Riley", "Zane", "Louis", "Aidan", "Paxton",
"Maximiliano", "Karson", "Cash", "Cayden", "Emerson", "Tobias", "Ronan",
"Brian", "Dallas", "Bradley", "Jorge", "Walter", "Josue", "Khalil",
"Damien", "Jett", "Kairo", "Zander", "Andre", "Cohen", "Crew", "Hendrix",
"Colin", "Chance", "Malakai", "Clayton", "Daxton", "Malcolm", "Lennox",
"Martin", "Jaden", "Kayson", "Bodhi", "Francisco", "Cody", "Erick",
"Kameron", "Atticus", "Dante", "Jensen", "Cruz", "Finley", "Brady",
"Joaquin", "Anderson", "Gunner", "Muhammad", "Zayn", "Derek", "Raymond",
"Kyle", "Angelo", "Reid", "Spencer", "Nico", "Jaylen", "Jake", "Prince",
"Manuel", "Ali", "Gideon", "Stephen", "Ellis", "Orion", "Rylan", "Eduardo",
"Mario", "Rory", "Cristian", "Odin", "Tanner", "Julius", "Callum", "Sean",
"Kane", "Ricardo", "Travis", "Wade", "Warren", "Fernando", "Titus",
"Leonel", "Edwin", "Cairo", "Corbin", "Dakota", "Ismael", "Colson",
"Killian", "Major", "Tate", "Gianni", "Elian", "Remy", "Lawson", "Niko",
"Nasir", "Kade", "Armani", "Ezequiel", "Marshall", "Hector", "Desmond",
"Kason", "Garrett", "Jared", "Cyrus", "Russell", "Cesar", "Tyson", "Malik",
"Donovan", "Jaxton", "Cade", "Romeo", "Nehemiah", "Sergio", "Iker",
"Caiden", "Jay", "Pablo", "Devin", "Jeffrey", "Otto", "Kamari", "Ronin",
"Johnny", "Clark", "Ari", "Marco", "Edgar", "Bowen", "Jaiden", "Grady",
"Zayne", "Sullivan", "Jayceon", "Sterling", "Andy", "Conor", "Raiden",
"Royal", "Royce", "Solomon", "Trevor", "Winston", "Emanuel", "Finnegan",
"Pedro", "Luciano", "Harvey", "Franklin", "Noel", "Troy", "Princeton",
"Johnathan", "Erik", "Fabian", "Oakley", "Rhys", "Porter", "Hugo", "Frank",
"Damon", "Kendrick", "Mathias", "Milan", "Peyton", "Wilder", "Callan",
"Gregory", "Seth", "Matthias", "Briggs", "Ibrahim", "Roberto", "Conner",
"Quinn", "Kashton", "Sage", "Santino", "Kolton", "Alijah", "Dominick",
"Zyaire", "Apollo", "Kylo", "Reed", "Philip", "Kian", "Shawn", "Kaison",
"Leonidas", "Ayaan", "Lucca", "Memphis", "Ford", "Baylor", "Kyson", "Uriel",
"Allen", "Collin", "Ruben", "Archie", "Dalton", "Esteban", "Adan",
"Forrest", "Alonzo", "Isaias", "Leland", "Jase", "Dax", "Kasen", "Gage",
"Kamden", "Marcos", "Jamison", "Francis", "Hank", "Alexis", "Tripp",
"Frederick", "Jonas", "Stetson", "Cassius", "Izaiah", "Eden", "Maximilian",
"Rocco", "Tatum", "Keegan", "Aziel", "Moses", "Bruce", "Lewis", "Braylen",
"Omari", "Mack", "Augustus", "Enrique", "Armando", "Pierce", "Moises",
"Asa", "Shane", "Emmitt", "Soren", "Dorian", "Keanu", "Zaiden", "Raphael",
"Deacon", "Abdiel", "Kieran", "Phillip", "Ryland", "Zachariah", "Casey",
"Zaire", "Albert", "Baker", "Corey", "Kylan", "Denver", "Gunnar", "Jayson",
"Drew", "Callen", "Jasiah", "Drake", "Kannon", "Braylon", "Sonny", "Bo", | "Skyler", "Danny", "Roland", "Chandler", "Yusuf", "Samson", "Case", "Zain",
"Roy", "Rodrigo", "Sutton", "Boone", "Saint", "Saul", "Jaziel", "Hezekiah",
"Alec", "Arturo", "Jamari", "Jaxtyn", "Julien", "Koa", "Reece", "Landen",
"Koda", "Darius", "Sylas", "Ares", "Kyree", "Boston", "Keith", "Taylor",
"Johan", "Edison", "Sincere", "Watson", "Jerry", "Nikolas", "Quincy",
"Shepherd", "Brycen", "Marvin", "Dariel", "Axton", "Donald", "Bodie",
"Finnley", "Onyx", "Rayan", "Raylan", "Brixton", "Colby", "Shiloh",
"Valentino", "Layton", "Trenton", "Landyn", "Alessandro", "Ahmad",
"Gustavo", "Ledger", "Ridge", "Ander", "Ahmed", "Kingsley", "Issac",
"Mauricio", "Tony", "Leonard", "Mohammed", "Uriah", "Duke", "Kareem",
"Lucian", "Marcelo", "Aarav", "Leandro", "Reign", "Clay", "Kohen", "Dennis",
"Samir", "Ermias", "Otis", "Emir", "Nixon", "Ty", "Sam", "Fletcher",
"Wilson", "Dustin", "Hamza", "Bryant", "Flynn", "Lionel", "Mohammad",
"Cason", "Jamir", "Aden", "Dakari", "Justice", "Dillon", "Layne", "Zaid",
"Alden", "Nelson", "Devon", "Titan", "Chris", "Khari", "Zeke", "Noe",
"Alberto", "Roger", "Brock", "Rex", "Quinton", "Alvin", "Cullen", "Azariah",
"Harlan", "Kellan", "Lennon", "Marcel", "Keaton", "Morgan", "Ricky", "Trey",
"Karsyn", "Langston", "Miller", "Chaim", "Salvador", "Amias", "Tadeo",
"Curtis", "Lachlan", "Amos", "Anakin", "Krew", "Tomas", "Jefferson",
"Yosef", "Bruno", "Korbin", "Augustine", "Cayson", "Mathew", "Vihaan",
"Jamie", "Clyde", "Brendan", "Jagger", "Carmelo", "Harry", "Nathanael",
"Mitchell", "Darren", "Ray", "Jedidiah", "Jimmy", "Lochlan", "Bellamy",
"Eddie", "Rayden", "Reese", "Stanley", "Joe", "Houston", "Douglas",
"Vincenzo", "Casen", "Emery", "Joziah", "Leighton", "Marcellus", "Atreus",
"Aron", "Hugh", "Musa", "Tommy", "Alfredo", "Junior", "Neil", "Westley",
"Banks", "Eliel", "Melvin", "Maximo", "Briar", "Colten", "Lance", "Nova",
"Trace", "Axl", "Ramon", "Vicente", "Brennan", "Caspian", "Remi", "Deandre",
"Legacy", "Lee", "Valentin", "Ben", "Louie", "Westin", "Wayne", "Benicio",
"Grey", "Zayd", "Gatlin", "Mekhi", "Orlando", "Bjorn", "Harley", "Alonso",
"Rio", "Aldo", "Byron", "Eliseo", "Ernesto", "Talon", "Thaddeus", "Brecken",
"Kace", "Kellen", "Enoch", "Kiaan", "Lian", "Creed", "Rohan", "Callahan",
"Jaxxon", "Ocean", "Crosby", "Dash", "Gary", "Mylo", "Ira", "Magnus",
"Salem", "Abdullah", "Kye", "Tru", "Forest", "Jon", "Misael", "Madden",
"Braden", "Carl", "Hassan", "Emory", "Kristian", "Alaric", "Ambrose",
"Dario", "Allan", "Bode", "Boden", "Juelz", "Kristopher", "Genesis",
"Idris", "Ameer", "Anders", "Darian", "Kase", "Aryan", "Dane", "Guillermo",
"Elisha", "Jakobe", "Thatcher", "Eugene", "Ishaan", "Larry", "Wesson",
"Yehuda", "Alvaro", "Bobby", "Bronson", "Dilan", "Kole", "Kyro", "Tristen",
"Blaze", "Brayan", "Jadiel", "Kamryn", "Demetrius", "Maurice", "Arian",
"Kabir", "Rocky", "Rudy", "Randy", "Rodney", "Yousef", "Felipe", "Robin",
"Aydin", "Dior", "Kaiser", "Van", "Brodie", "London", "Eithan", "Stefan",
"Ulises", "Camilo", "Branson", "Jakari", "Judson", "Yahir", "Zavier",
"Damari", "Jakob", "Jaxx", "Bentlee", "Cain", "Niklaus", "Rey", "Zahir",
"Aries", "Blaine", "Kyng", "Castiel", "Henrik", "Joey", "Khalid", "Bear",
"Graysen", "Jair", "Kylen", "Darwin", "Alfred", "Ayan", "Kenji", "Zakai",
"Avi", "Cory", "Fisher", "Jacoby", "Osiris", "Harlem", "Jamal", "Santos",
"Wallace", "Brett", "Fox", "Leif", "Maison", "Reuben", "Adler", "Zev",
"Calum", "Kelvin", "Zechariah", "Bridger", "Mccoy", "Seven", "Shepard",
"Azrael", "Leroy", "Terry", "Harold", "Mac", "Mordechai", "Ahmir", "Cal",
"Franco", "Trent", "Blaise", "Coen", "Dominik", "Marley", "Davion",
"Jeremias", "Riggs", "Jones", "Will", "Damir", "Dangelo", "Canaan", "Dion",
"Jabari", "Landry", "Salvatore", "Kody", "Hakeem", "Truett", "Gerald",
"Lyric", "Gordon", "Jovanni", "Kamdyn", "Alistair", "Cillian", "Foster",
"Terrance", "Murphy", "Zyair", "Cedric", "Rome", "Abner", "Colter",
"Dayton", "Jad", "Xzavier", "Rene", "Vance", "Duncan", "Frankie", "Bishop",
"Davian", "Everest", "Heath", "Jaxen", "Marlon", "Maxton", "Reginald",
"Harris", "Jericho", "Keenan", "Korbyn", "Wes", "Eliezer", "Jeffery",
"Kalel", "Kylian", "Turner", "Willie", "Rogelio", "Ephraim",
)
female_first_names = (
"Olivia", "Emma", "Ava", "Charlotte", "Sophia", "Amelia", "Isabella", "Mia",
"Evelyn", "Harper", "Camila", "Gianna", "Abigail", "Luna", "Ella",
"Elizabeth", "Sofia", "Emily", "Avery", "Mila", "Scarlett", "Eleanor",
"Madison", "Layla", "Penelope", "Aria", "Chloe", "Grace", "Ellie", "Nora",
"Hazel", "Zoey", "Riley", "Victoria", "Lily", "Aurora", "Violet", "Nova",
"Hannah", "Emilia", "Zoe", "Stella", "Everly", "Isla", "Leah", "Lillian",
"Addison", "Willow", "Lucy", "Paisley", "Natalie", "Naomi", "Eliana",
"Brooklyn", "Elena", "Aubrey", "Claire", "Ivy", "Kinsley", "Audrey", "Maya",
"Genesis", "Skylar", "Bella", "Aaliyah", "Madelyn", "Savannah", "Anna",
"Delilah", "Serenity", "Caroline", "Kennedy", "Valentina", "Ruby", "Sophie",
"Alice", "Gabriella", "Sadie", "Ariana", "Allison", "Hailey", "Autumn",
"Nevaeh", "Natalia", "Quinn", "Josephine", "Sarah", "Cora", "Emery",
"Samantha", "Piper", "Leilani", "Eva", "Everleigh", "Madeline", "Lydia",
"Jade", "Peyton", "Brielle", "Adeline", "Vivian", "Rylee", "Clara",
"Raelynn", "Melanie", "Melody", "Julia", "Athena", "Maria", "Liliana",
"Hadley", "Arya", "Rose", "Reagan", "Eliza", "Adalynn", "Kaylee", "Lyla",
"Mackenzie", "Alaia", "Isabelle", "Charlie", "Arianna", "Mary", "Remi",
"Margaret", "Iris", "Parker", "Ximena", "Eden", "Ayla", "Kylie", "Elliana",
"Josie", "Katherine", "Faith", "Alexandra", "Eloise", "Adalyn", "Amaya",
"Jasmine", "Amara", "Daisy", "Reese", "Valerie", "Brianna", "Cecilia",
"Andrea", "Summer", "Valeria", "Norah", "Ariella", "Esther", "Ashley",
"Emerson", "Aubree", "Isabel", "Anastasia", "Ryleigh", "Khloe", "Taylor",
"Londyn", "Lucia", "Emersyn", "Callie", "Sienna", "Blakely", "Kehlani",
"Genevieve", "Alina", "Bailey", "Juniper", "Maeve", "Molly", "Harmony",
"Georgia", "Magnolia", "Catalina", "Freya", "Juliette", "Sloane", "June",
"Sara", "Ada", "Kimberly", "River", "Ember", "Juliana", "Aliyah", "Millie",
"Brynlee", "Teagan", "Morgan", "Jordyn", "London", "Alaina", "Olive",
"Rosalie", "Alyssa", "Ariel", "Finley", "Arabella", "Journee", "Hope",
"Leila", "Alana", "Gemma", "Vanessa", "Gracie", "Noelle", "Marley", "Elise",
"Presley", "Kamila", "Zara", "Amy", "Kayla", "Payton", "Blake", "Ruth",
"Alani", "Annabelle", "Sage", "Aspen", "Laila", "Lila", "Rachel", "Trinity",
"Daniela", "Alexa", "Lilly", "Lauren", "Elsie", "Margot", "Adelyn", "Zuri",
"Brooke", "Sawyer", "Lilah", "Lola", "Selena", "Mya", "Sydney", "Diana",
"Ana", "Vera", "Alayna", "Nyla", "Elaina", "Rebecca", "Angela", "Kali",
"Alivia", "Raegan", "Rowan", "Phoebe", "Camilla", "Joanna", "Malia",
"Vivienne", "Dakota", "Brooklynn", "Evangeline", "Camille", "Jane",
"Nicole", "Catherine", "Jocelyn", "Julianna", "Lena", "Lucille", "Mckenna",
"Paige", "Adelaide", "Charlee", "Mariana", "Myla", "Mckenzie", "Tessa",
"Miriam", "Oakley", "Kailani", "Alayah", "Amira", "Adaline", "Phoenix",
"Milani", "Annie", "Lia", "Angelina", "Harley", "Cali", "Maggie", "Hayden",
"Leia", "Fiona", "Briella", "Journey", "Lennon", "Saylor", "Jayla", "Kaia",
"Thea", "Adriana", "Mariah", "Juliet", "Oaklynn", "Kiara", "Alexis",
"Haven", "Aniyah", "Delaney", "Gracelynn", "Kendall", "Winter", "Lilith",
"Logan", "Amiyah", "Evie", "Alexandria", "Gracelyn", "Gabriela", "Sutton",
"Harlow", "Madilyn", "Makayla", "Evelynn", "Gia", "Nina", "Amina",
"Giselle", "Brynn", "Blair", "Amari", "Octavia", "Michelle", "Talia",
"Demi", "Alaya", "Kaylani", "Izabella", "Fatima", "Tatum", "Makenzie",
"Lilliana", "Arielle", "Palmer", "Melissa", "Willa", "Samara", "Destiny",
"Dahlia", "Celeste", "Ainsley", "Rylie", "Reign", "Laura", "Adelynn",
"Gabrielle", "Remington", "Wren", "Brinley", "Amora", "Lainey", "Collins",
"Lexi", "Aitana", "Alessandra", "Kenzie", "Raelyn", "Elle", "Everlee",
"Haisley", "Hallie", "Wynter", "Daleyza", "Gwendolyn", "Paislee", "Ariyah",
"Veronica", "Heidi", "Anaya", "Cataleya", "Kira", "Avianna", "Felicity",
"Aylin", "Miracle", "Sabrina", "Lana", "Ophelia", "Elianna", "Royalty",
"Madeleine", "Esmeralda", "Joy", "Kalani", "Esme", "Jessica", "Leighton",
"Ariah", "Makenna", "Nylah", "Viviana", "Camryn", "Cassidy", "Dream",
"Luciana", "Maisie", "Stevie", "Kate", "Lyric", "Daniella", "Alicia",
"Daphne", "Frances", "Charli", "Raven", "Paris", "Nayeli", "Serena",
"Heaven", "Bianca", "Helen", "Hattie", "Averie", "Mabel", "Selah", "Allie",
"Marlee", "Kinley", "Regina", "Carmen", "Jennifer", "Jordan", "Alison",
"Stephanie", "Maren", "Kayleigh", "Angel", "Annalise", "Jacqueline",
"Braelynn", "Emory", "Rosemary", "Scarlet", "Amanda", "Danielle", "Emelia",
"Ryan", "Carolina", "Astrid", "Kensley", "Shiloh", "Maci", "Francesca",
"Rory", "Celine", "Kamryn", "Zariah", "Liana", "Poppy", "Maliyah", "Keira",
"Skyler", "Noa", "Skye", "Nadia", "Addilyn", "Rosie", "Eve", "Sarai",
"Edith", "Jolene", "Maddison", "Meadow", "Charleigh", "Matilda", "Elliott",
"Madelynn", "Holly", "Leona", "Azalea", "Katie", "Mira", "Ari", "Kaitlyn",
"Danna", "Cameron", "Kyla", "Bristol", "Kora", "Armani", "Nia", "Malani",
"Dylan", "Remy", "Maia", "Dior", "Legacy", "Alessia", "Shelby", "Maryam",
"Sylvia", "Yaretzi", "Lorelei", "Madilynn", "Abby", "Helena", "Jimena",
"Elisa", "Renata", "Amber", "Aviana", "Carter", "Emmy", "Haley", "Alondra",
"Elaine", "Erin", "April", "Emely", "Imani", "Kennedi", "Lorelai", "Hanna",
"Kelsey", "Aurelia", "Colette", "Jaliyah", "Kylee", "Macie", "Aisha",
"Dorothy", "Charley", "Kathryn", "Adelina", "Adley", "Monroe", "Sierra",
"Ailani", "Miranda", "Mikayla", "Alejandra", "Amirah", "Jada", "Jazlyn",
"Jenna", "Jayleen", "Beatrice", "Kendra", "Lyra", "Nola", "Emberly",
"Mckinley", "Myra", "Katalina", "Antonella", "Zelda", "Alanna", "Amaia",
"Priscilla", "Briar", "Kaliyah", "Itzel", "Oaklyn", "Alma", "Mallory",
"Novah", "Amalia", "Fernanda", "Alia", "Angelica", "Elliot", "Justice",
"Mae", "Cecelia", "Gloria", "Ariya", "Virginia", "Cheyenne", "Aleah",
"Jemma", "Henley", "Meredith", "Leyla", "Lennox", "Ensley", "Zahra",
"Reina", "Frankie", "Lylah", "Nalani", "Reyna", "Saige", "Ivanna", "Aleena",
"Emerie", "Ivory", "Leslie", "Alora", "Ashlyn", "Bethany", "Bonnie",
"Sasha", "Xiomara", "Salem", "Adrianna", "Dayana", "Clementine", "Karina",
"Karsyn", "Emmie", "Julie", "Julieta", "Briana", "Carly", "Macy", "Marie",
"Oaklee", "Christina", "Malaysia", "Ellis", "Irene", "Anne", "Anahi",
"Mara", "Rhea", "Davina", "Dallas", "Jayda", "Mariam", "Skyla", "Siena",
"Elora", "Marilyn", "Jazmin", "Megan", "Rosa", "Savanna", "Allyson",
"Milan", "Coraline", "Johanna", "Melany", "Chelsea", "Michaela", "Melina",
"Angie", "Cassandra", "Yara", "Kassidy", "Liberty", "Lilian", "Avah",
"Anya", "Laney", "Navy", "Opal", "Amani", "Zaylee", "Mina", "Sloan",
"Romina", "Ashlynn", "Aliza", "Liv", "Malaya", "Blaire", "Janelle", "Kara",
"Analia", "Hadassah", "Hayley", "Karla", "Chaya", "Cadence", "Kyra",
"Alena", "Ellianna", "Katelyn", "Kimber", "Laurel", "Lina", "Capri",
"Braelyn", "Faye", "Kamiyah", "Kenna", "Louise", "Calliope", "Kaydence",
"Nala", "Tiana", "Aileen", "Sunny", "Zariyah", "Milana", "Giuliana",
"Eileen", "Elodie", "Rayna", "Monica", "Galilea", "Journi", "Lara",
"Marina", "Aliana", "Harmoni", "Jamie", "Holland", "Emmalyn", "Lauryn",
"Chanel", "Tinsley", "Jessie", "Lacey", "Elyse", "Janiyah", "Jolie", "Ezra",
"Marleigh", "Roselyn", "Lillie", "Louisa", "Madisyn", "Penny", "Kinslee",
"Treasure", "Zaniyah", "Estella", "Jaylah", "Khaleesi", "Alexia", "Dulce",
"Indie", "Maxine", "Waverly", "Giovanna", "Miley", "Saoirse", "Estrella",
"Greta", "Rosalia", "Mylah", "Teresa", "Bridget", "Kelly", "Adalee",
"Aubrie", "Lea", "Harlee", "Anika", "Itzayana", "Hana", "Kaisley",
"Mikaela", "Naya", "Avalynn", "Margo", "Sevyn", "Florence", "Keilani",
"Lyanna", "Joelle", "Kataleya", "Royal", "Averi", "Kallie", "Winnie",
"Baylee", "Martha", "Pearl", "Alaiya", "Rayne", "Sylvie", "Brylee",
"Jazmine", "Ryann", "Kori", "Noemi", "Haylee", "Julissa", "Celia", "Laylah",
"Rebekah", "Rosalee", "Aya", "Bria", "Adele", "Aubrielle", "Tiffany",
"Addyson", "Kai", "Bellamy", "Leilany", "Princess", "Chana", "Estelle",
"Selene", "Sky", "Dani", "Thalia", "Ellen", "Rivka", "Amelie", "Andi",
"Kynlee", "Raina", "Vienna", "Alianna", "Livia", "Madalyn", "Mercy",
"Novalee", "Ramona", "Vada", "Berkley", "Gwen", "Persephone", "Milena",
"Paula", "Clare", "Kairi", "Linda", "Paulina", "Kamilah", "Amoura",
"Hunter", "Isabela", "Karen", "Marianna", "Sariyah", "Theodora", "Annika",
"Kyleigh", "Nellie", "Scarlette", "Keyla", "Kailey", "Mavis", "Lilianna",
"Rosalyn", "Sariah", "Tori", "Yareli", "Aubriella", "Bexley", "Bailee",
"Jianna", "Keily", "Annabella", "Azariah", "Denisse", "Promise", "August",
"Hadlee", "Halle", "Fallon", "Oakleigh", "Zaria", "Jaylin", "Paisleigh",
"Crystal", "Ila", "Aliya", "Cynthia", "Giana", "Maleah", "Rylan", "Aniya",
"Denise", "Emmeline", "Scout", "Simone", "Noah", "Zora", "Meghan", "Landry",
"Ainhoa", "Lilyana", "Noor", "Belen", "Brynleigh", "Cleo", "Meilani",
"Karter", "Amaris", "Frida", "Iliana", "Violeta", "Addisyn", "Nancy",
"Denver", "Leanna", "Braylee", "Kiana", "Wrenley", "Barbara", "Khalani",
"Aspyn", "Ellison", "Judith", "Robin", "Valery", "Aila", "Deborah", "Cara",
"Clarissa", "Iyla", "Lexie", "Anais", "Kaylie", "Nathalie", "Alisson",
"Della", "Addilynn", "Elsa", "Zoya", "Layne", "Marlowe", "Jovie", "Kenia",
"Samira", "Jaylee", "Jenesis", "Etta", "Shay", "Amayah", "Avayah", "Egypt",
"Flora", "Raquel", "Whitney", "Zola", "Giavanna", "Raya", "Veda", "Halo",
"Paloma", "Nataly", "Whitley", "Dalary", "Drew", "Guadalupe", "Kamari",
"Esperanza", "Loretta", "Malayah", "Natasha", "Stormi", "Ansley", "Carolyn",
"Corinne", "Paola", "Brittany", "Emerald", "Freyja", "Zainab", "Artemis",
"Jillian", "Kimora", "Zoie", "Aislinn", "Emmaline", "Ayleen", "Queen",
"Jaycee", "Murphy", "Nyomi", "Elina", "Hadleigh", "Marceline", "Marisol",
"Yasmin", "Zendaya", "Chandler", "Emani", "Jaelynn", "Kaiya", "Nathalia",
"Violette", "Joyce", "Paityn", "Elisabeth", "Emmalynn", "Luella",
"Yamileth", "Aarya", "Luisa", "Zhuri", "Araceli", "Harleigh", "Madalynn",
"Melani", "Laylani", "Magdalena", "Mazikeen", "Belle", "Kadence",
)
last_names = (
"Smith", "Johnson", "Williams", "Brown", "Jones", "Garcia", "Miller",
"Davis", "Rodriguez", "Martinez", "Hernandez", "Lopez", "Gonzales",
"Wilson", "Anderson", "Thomas", "Taylor", "Moore", "Jackson", "Martin",
"Lee", "Perez", "Thompson", "White", "Harris", "Sanchez", "Clark",
"Ramirez", "Lewis", "Robinson", "Walker", "Young", "Allen", "King",
"Wright", "Scott", "Torres", "Nguyen", "Hill", "Flores", "Green", "Adams",
"Nelson", "Baker", "Hall", "Rivera", "Campbell", "Mitchell", "Carter",
"Roberts", "Gomez", "Phillips", "Evans", "Turner", "Diaz", "Parker", "Cruz",
"Edwards", "Collins", "Reyes", "Stewart", "Morris", "Morales", "Murphy",
"Cook", "Rogers", "Gutierrez", "Ortiz", "Morgan", "Cooper", "Peterson",
"Bailey", "Reed", "Kelly", "Howard", "Ramos", "Kim", "Cox", "Ward",
"Richardson", "Watson", "Brooks", "Chavez", "Wood", "James", "Bennet",
"Gray", "Mendoza", "Ruiz", "Hughes", "Price", "Alvarez", "Castillo",
"Sanders", "Patel", "Myers", "Long", "Ross", "Foster", "Jimenez",
)
skill_levels = (
"Beginner", "Intermediate", "Advanced", "Expert",
)
subjects = (
"Web: HTML, CSS, JavaScript", "Data Science: Python",
"Android: Java", "iOS: Swift", "Career Development",
"General Programming",
)
resource_items = ("Laptop", "Books", "Scholarships",
"Mental Health Need", "Financial stipends")
disability = (True, False)
work_status = (True, False)
receiving_assistance = (True, False)
convictions = (
"Felony", "Misdemeanor", "Infraction",
)
feedbacks = (
"Not Recommended, Poor", "Conflicted, Fair", "Recommended, Good",
"Highly Recommended, Very Good", "Best, Excellent",
)
topics = (
"GCA Help", "Resume Help", "Job Search", "Progress Check"
)
def random_first_name(percent_male: int = 50):
if randint(1, 100) > percent_male:
return choice(female_first_names)
else:
return choice(male_first_names)
def percent_true(percent):
return 100 * random() < percent
def generate_uuid(n_len: int):
n1 = ceil(n_len / 2)
n2 = floor(n_len / 2)
prefix = choices(string.ascii_letters, k=n1)
suffix = map(str, choices(range(0, 9), k=n2))
uuid_list = list(chain(prefix, suffix))
shuffle(uuid_list)
uuid = "".join(uuid_list)
return uuid | "Moshe", "Huxley", "Quentin", "Rowen", "Santana", "Cannon", "Kenzo",
"Wells", "Julio", "Nikolai", "Conrad", "Jalen", "Makai", "Benson",
"Derrick", "Gerardo", "Davis", "Abram", "Mohamed", "Ronald", "Raul",
"Arjun", "Dexter", "Kaysen", "Jaime", "Scott", "Lawrence", "Ariel", |
create.go | package cmd
import (
"github.com/awesomenix/azk/cmd/addons"
"github.com/awesomenix/azk/cmd/cluster"
"github.com/awesomenix/azk/cmd/controlplane"
"github.com/awesomenix/azk/cmd/flow"
"github.com/awesomenix/azk/cmd/nodepool"
"github.com/spf13/cobra"
)
var CreateCmd = &cobra.Command{
Use: "create",
}
func init() | {
RootCmd.AddCommand(CreateCmd)
CreateCmd.AddCommand(cluster.CreateClusterCmd)
CreateCmd.AddCommand(controlplane.CreateControlPlaneCmd)
CreateCmd.AddCommand(nodepool.CreateNodepoolCmd)
CreateCmd.AddCommand(flow.FlowCmd)
CreateCmd.AddCommand(addons.CreateAddonsCmd)
} |
|
add_thesaurus_for_api.go | package qualitycheck
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
)
// AddThesaurusForApi invokes the qualitycheck.AddThesaurusForApi API synchronously
func (client *Client) AddThesaurusForApi(request *AddThesaurusForApiRequest) (response *AddThesaurusForApiResponse, err error) {
response = CreateAddThesaurusForApiResponse()
err = client.DoAction(request, response)
return
}
// AddThesaurusForApiWithChan invokes the qualitycheck.AddThesaurusForApi API asynchronously
func (client *Client) AddThesaurusForApiWithChan(request *AddThesaurusForApiRequest) (<-chan *AddThesaurusForApiResponse, <-chan error) {
responseChan := make(chan *AddThesaurusForApiResponse, 1)
errChan := make(chan error, 1)
err := client.AddAsyncTask(func() {
defer close(responseChan)
defer close(errChan)
response, err := client.AddThesaurusForApi(request)
if err != nil {
errChan <- err
} else {
responseChan <- response
}
})
if err != nil {
errChan <- err
close(responseChan)
close(errChan)
}
return responseChan, errChan
}
// AddThesaurusForApiWithCallback invokes the qualitycheck.AddThesaurusForApi API asynchronously
func (client *Client) AddThesaurusForApiWithCallback(request *AddThesaurusForApiRequest, callback func(response *AddThesaurusForApiResponse, err error)) <-chan int {
result := make(chan int, 1)
err := client.AddAsyncTask(func() {
var response *AddThesaurusForApiResponse
var err error
defer close(result)
response, err = client.AddThesaurusForApi(request)
callback(response, err)
result <- 1
})
if err != nil {
defer close(result)
callback(nil, err)
result <- 0
}
return result
}
// AddThesaurusForApiRequest is the request struct for api AddThesaurusForApi
type AddThesaurusForApiRequest struct {
*requests.RpcRequest
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
JsonStr string `position:"Query" name:"JsonStr"`
}
// AddThesaurusForApiResponse is the response struct for api AddThesaurusForApi
type AddThesaurusForApiResponse struct {
*responses.BaseResponse
Code string `json:"Code" xml:"Code"`
Message string `json:"Message" xml:"Message"`
Data int64 `json:"Data" xml:"Data"`
RequestId string `json:"RequestId" xml:"RequestId"`
Success bool `json:"Success" xml:"Success"`
}
// CreateAddThesaurusForApiRequest creates a request to invoke AddThesaurusForApi API
func | () (request *AddThesaurusForApiRequest) {
request = &AddThesaurusForApiRequest{
RpcRequest: &requests.RpcRequest{},
}
request.InitWithApiInfo("Qualitycheck", "2019-01-15", "AddThesaurusForApi", "", "")
request.Method = requests.POST
return
}
// CreateAddThesaurusForApiResponse creates a response to parse from AddThesaurusForApi response
func CreateAddThesaurusForApiResponse() (response *AddThesaurusForApiResponse) {
response = &AddThesaurusForApiResponse{
BaseResponse: &responses.BaseResponse{},
}
return
}
| CreateAddThesaurusForApiRequest |
jquery.signalR-2.2.2.min.js | /*!
* ASP.NET SignalR JavaScript Library v2.2.2
* http://signalr.net/
*
* Copyright (c) .NET Foundation. All rights reserved.
* Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
*
*/
(function(n,t,i){function w(t,i){var u,f;if(n.isArray(t)){for(u=t.length-1;u>=0;u--)f=t[u],n.type(f)==="string"&&r.transports[f]||(i.log("Invalid transport: "+f+", removing it from the transports list."),t.splice(u,1));t.length===0&&(i.log("No transports remain within the specified transport array."),t=null)}else if(r.transports[t]||t==="auto"){if(t==="auto"&&r._.ieVersion<=8)return["longPolling"]}else i.log("Invalid transport: "+t.toString()+"."),t=null;return t}function | (n){return n==="http:"?80:n==="https:"?443:void 0}function a(n,t){return t.match(/:\d+$/)?t:t+":"+b(n)}function k(t,i){var u=this,r=[];u.tryBuffer=function(i){return t.state===n.signalR.connectionState.connecting?(r.push(i),!0):!1};u.drain=function(){if(t.state===n.signalR.connectionState.connected)while(r.length>0)i(r.shift())};u.clear=function(){r=[]}}var f={nojQuery:"jQuery was not found. Please ensure jQuery is referenced before the SignalR client JavaScript file.",noTransportOnInit:"No transport could be initialized successfully. Try specifying a different transport or none at all for auto initialization.",errorOnNegotiate:"Error during negotiation request.",stoppedWhileLoading:"The connection was stopped during page load.",stoppedWhileNegotiating:"The connection was stopped during the negotiate request.",errorParsingNegotiateResponse:"Error parsing negotiate response.",errorDuringStartRequest:"Error during start request. Stopping the connection.",stoppedDuringStartRequest:"The connection was stopped during the start request.",errorParsingStartResponse:"Error parsing start response: '{0}'. Stopping the connection.",invalidStartResponse:"Invalid start response: '{0}'. Stopping the connection.",protocolIncompatible:"You are using a version of the client that isn't compatible with the server. Client version {0}, server version {1}.",sendFailed:"Send failed.",parseFailed:"Failed at parsing response: {0}",longPollFailed:"Long polling request failed.",eventSourceFailedToConnect:"EventSource failed to connect.",eventSourceError:"Error raised by EventSource",webSocketClosed:"WebSocket closed.",pingServerFailedInvalidResponse:"Invalid ping response when pinging server: '{0}'.",pingServerFailed:"Failed to ping server.",pingServerFailedStatusCode:"Failed to ping server. Server responded with status code {0}, stopping the connection.",pingServerFailedParse:"Failed to parse ping server response, stopping the connection.",noConnectionTransport:"Connection is in an invalid state, there is no transport active.",webSocketsInvalidState:"The Web Socket transport is in an invalid state, transitioning into reconnecting.",reconnectTimeout:"Couldn't reconnect within the configured timeout of {0} ms, disconnecting.",reconnectWindowTimeout:"The client has been inactive since {0} and it has exceeded the inactivity timeout of {1} ms. Stopping the connection."};if(typeof n!="function")throw new Error(f.nojQuery);var r,h,o=t.document.readyState==="complete",e=n(t),c="__Negotiate Aborted__",u={onStart:"onStart",onStarting:"onStarting",onReceived:"onReceived",onError:"onError",onConnectionSlow:"onConnectionSlow",onReconnecting:"onReconnecting",onReconnect:"onReconnect",onStateChanged:"onStateChanged",onDisconnect:"onDisconnect"},v=function(n,i){if(i!==!1){var r;typeof t.console!="undefined"&&(r="["+(new Date).toTimeString()+"] SignalR: "+n,t.console.debug?t.console.debug(r):t.console.log&&t.console.log(r))}},s=function(t,i,r){return i===t.state?(t.state=r,n(t).triggerHandler(u.onStateChanged,[{oldState:i,newState:r}]),!0):!1},y=function(n){return n.state===r.connectionState.disconnected},l=function(n){return n._.keepAliveData.activated&&n.transport.supportsKeepAlive(n)},p=function(i){var f,e;i._.configuredStopReconnectingTimeout||(e=function(t){var i=r._.format(r.resources.reconnectTimeout,t.disconnectTimeout);t.log(i);n(t).triggerHandler(u.onError,[r._.error(i,"TimeoutException")]);t.stop(!1,!1)},i.reconnecting(function(){var n=this;n.state===r.connectionState.reconnecting&&(f=t.setTimeout(function(){e(n)},n.disconnectTimeout))}),i.stateChanged(function(n){n.oldState===r.connectionState.reconnecting&&t.clearTimeout(f)}),i._.configuredStopReconnectingTimeout=!0)};if(r=function(n,t,i){return new r.fn.init(n,t,i)},r._={defaultContentType:"application/x-www-form-urlencoded; charset=UTF-8",ieVersion:function(){var i,n;return t.navigator.appName==="Microsoft Internet Explorer"&&(n=/MSIE ([0-9]+\.[0-9]+)/.exec(t.navigator.userAgent),n&&(i=t.parseFloat(n[1]))),i}(),error:function(n,t,i){var r=new Error(n);return r.source=t,typeof i!="undefined"&&(r.context=i),r},transportError:function(n,t,r,u){var f=this.error(n,r,u);return f.transport=t?t.name:i,f},format:function(){for(var t=arguments[0],n=0;n<arguments.length-1;n++)t=t.replace("{"+n+"}",arguments[n+1]);return t},firefoxMajorVersion:function(n){var t=n.match(/Firefox\/(\d+)/);return!t||!t.length||t.length<2?0:parseInt(t[1],10)},configurePingInterval:function(i){var f=i._.config,e=function(t){n(i).triggerHandler(u.onError,[t])};f&&!i._.pingIntervalId&&f.pingInterval&&(i._.pingIntervalId=t.setInterval(function(){r.transports._logic.pingServer(i).fail(e)},f.pingInterval))}},r.events=u,r.resources=f,r.ajaxDefaults={processData:!0,timeout:null,async:!0,global:!1,cache:!1},r.changeState=s,r.isDisconnecting=y,r.connectionState={connecting:0,connected:1,reconnecting:2,disconnected:4},r.hub={start:function(){throw new Error("SignalR: Error loading hubs. Ensure your hubs reference is correct, e.g. <script src='/signalr/js'><\/script>.");}},typeof e.on=="function")e.on("load",function(){o=!0});else e.load(function(){o=!0});r.fn=r.prototype={init:function(t,i,r){var f=n(this);this.url=t;this.qs=i;this.lastError=null;this._={keepAliveData:{},connectingMessageBuffer:new k(this,function(n){f.triggerHandler(u.onReceived,[n])}),lastMessageAt:(new Date).getTime(),lastActiveAt:(new Date).getTime(),beatInterval:5e3,beatHandle:null,totalTransportConnectTimeout:0};typeof r=="boolean"&&(this.logging=r)},_parseResponse:function(n){var t=this;return n?typeof n=="string"?t.json.parse(n):n:n},_originalJson:t.JSON,json:t.JSON,isCrossDomain:function(i,r){var u;return(i=n.trim(i),r=r||t.location,i.indexOf("http")!==0)?!1:(u=t.document.createElement("a"),u.href=i,u.protocol+a(u.protocol,u.host)!==r.protocol+a(r.protocol,r.host))},ajaxDataType:"text",contentType:"application/json; charset=UTF-8",logging:!1,state:r.connectionState.disconnected,clientProtocol:"1.5",reconnectDelay:2e3,transportConnectTimeout:0,disconnectTimeout:3e4,reconnectWindow:3e4,keepAliveWarnAt:2/3,start:function(i,h){var a=this,v={pingInterval:3e5,waitForPageLoad:!0,transport:"auto",jsonp:!1},d,y=a._deferral||n.Deferred(),b=t.document.createElement("a"),k,g;if(a.lastError=null,a._deferral=y,!a.json)throw new Error("SignalR: No JSON parser found. Please ensure json2.js is referenced before the SignalR.js file if you need to support clients without native JSON parsing support, e.g. IE<8.");if(n.type(i)==="function"?h=i:n.type(i)==="object"&&(n.extend(v,i),n.type(v.callback)==="function"&&(h=v.callback)),v.transport=w(v.transport,a),!v.transport)throw new Error("SignalR: Invalid transport(s) specified, aborting start.");return(a._.config=v,!o&&v.waitForPageLoad===!0)?(a._.deferredStartHandler=function(){a.start(i,h)},e.bind("load",a._.deferredStartHandler),y.promise()):a.state===r.connectionState.connecting?y.promise():s(a,r.connectionState.disconnected,r.connectionState.connecting)===!1?(y.resolve(a),y.promise()):(p(a),b.href=a.url,b.protocol&&b.protocol!==":"?(a.protocol=b.protocol,a.host=b.host):(a.protocol=t.document.location.protocol,a.host=b.host||t.document.location.host),a.baseUrl=a.protocol+"//"+a.host,a.wsProtocol=a.protocol==="https:"?"wss://":"ws://",v.transport==="auto"&&v.jsonp===!0&&(v.transport="longPolling"),a.url.indexOf("//")===0&&(a.url=t.location.protocol+a.url,a.log("Protocol relative URL detected, normalizing it to '"+a.url+"'.")),this.isCrossDomain(a.url)&&(a.log("Auto detected cross domain url."),v.transport==="auto"&&(v.transport=["webSockets","serverSentEvents","longPolling"]),typeof v.withCredentials=="undefined"&&(v.withCredentials=!0),v.jsonp||(v.jsonp=!n.support.cors,v.jsonp&&a.log("Using jsonp because this browser doesn't support CORS.")),a.contentType=r._.defaultContentType),a.withCredentials=v.withCredentials,a.ajaxDataType=v.jsonp?"jsonp":"text",n(a).bind(u.onStart,function(){n.type(h)==="function"&&h.call(a);y.resolve(a)}),a._.initHandler=r.transports._logic.initHandler(a),d=function(i,o){var c=r._.error(f.noTransportOnInit);if(o=o||0,o>=i.length){o===0?a.log("No transports supported by the server were selected."):o===1?a.log("No fallback transports were selected."):a.log("Fallback transports exhausted.");n(a).triggerHandler(u.onError,[c]);y.reject(c);a.stop();return}if(a.state!==r.connectionState.disconnected){var p=i[o],h=r.transports[p],v=function(){d(i,o+1)};a.transport=h;try{a._.initHandler.start(h,function(){var i=r._.firefoxMajorVersion(t.navigator.userAgent)>=11,f=!!a.withCredentials&&i;a.log("The start request succeeded. Transitioning to the connected state.");l(a)&&r.transports._logic.monitorKeepAlive(a);r.transports._logic.startHeartbeat(a);r._.configurePingInterval(a);s(a,r.connectionState.connecting,r.connectionState.connected)||a.log("WARNING! The connection was not in the connecting state.");a._.connectingMessageBuffer.drain();n(a).triggerHandler(u.onStart);e.bind("unload",function(){a.log("Window unloading, stopping the connection.");a.stop(f)});i&&e.bind("beforeunload",function(){t.setTimeout(function(){a.stop(f)},0)})},v)}catch(w){a.log(h.name+" transport threw '"+w.message+"' when attempting to start.");v()}}},k=a.url+"/negotiate",g=function(t,i){var e=r._.error(f.errorOnNegotiate,t,i._.negotiateRequest);n(i).triggerHandler(u.onError,e);y.reject(e);i.stop()},n(a).triggerHandler(u.onStarting),k=r.transports._logic.prepareQueryString(a,k),a.log("Negotiating with '"+k+"'."),a._.negotiateRequest=r.transports._logic.ajax(a,{url:k,error:function(n,t){t!==c?g(n,a):y.reject(r._.error(f.stoppedWhileNegotiating,null,a._.negotiateRequest))},success:function(t){var i,e,h,o=[],s=[];try{i=a._parseResponse(t)}catch(c){g(r._.error(f.errorParsingNegotiateResponse,c),a);return}if(e=a._.keepAliveData,a.appRelativeUrl=i.Url,a.id=i.ConnectionId,a.token=i.ConnectionToken,a.webSocketServerUrl=i.WebSocketServerUrl,a._.pollTimeout=i.ConnectionTimeout*1e3+1e4,a.disconnectTimeout=i.DisconnectTimeout*1e3,a._.totalTransportConnectTimeout=a.transportConnectTimeout+i.TransportConnectTimeout*1e3,i.KeepAliveTimeout?(e.activated=!0,e.timeout=i.KeepAliveTimeout*1e3,e.timeoutWarning=e.timeout*a.keepAliveWarnAt,a._.beatInterval=(e.timeout-e.timeoutWarning)/3):e.activated=!1,a.reconnectWindow=a.disconnectTimeout+(e.timeout||0),!i.ProtocolVersion||i.ProtocolVersion!==a.clientProtocol){h=r._.error(r._.format(f.protocolIncompatible,a.clientProtocol,i.ProtocolVersion));n(a).triggerHandler(u.onError,[h]);y.reject(h);return}n.each(r.transports,function(n){if(n.indexOf("_")===0||n==="webSockets"&&!i.TryWebSockets)return!0;s.push(n)});n.isArray(v.transport)?n.each(v.transport,function(t,i){n.inArray(i,s)>=0&&o.push(i)}):v.transport==="auto"?o=s:n.inArray(v.transport,s)>=0&&o.push(v.transport);d(o)}}),y.promise())},starting:function(t){var i=this;return n(i).bind(u.onStarting,function(){t.call(i)}),i},send:function(n){var t=this;if(t.state===r.connectionState.disconnected)throw new Error("SignalR: Connection must be started before data can be sent. Call .start() before .send()");if(t.state===r.connectionState.connecting)throw new Error("SignalR: Connection has not been fully initialized. Use .start().done() or .start().fail() to run logic after the connection has started.");return t.transport.send(t,n),t},received:function(t){var i=this;return n(i).bind(u.onReceived,function(n,r){t.call(i,r)}),i},stateChanged:function(t){var i=this;return n(i).bind(u.onStateChanged,function(n,r){t.call(i,r)}),i},error:function(t){var i=this;return n(i).bind(u.onError,function(n,r,u){i.lastError=r;t.call(i,r,u)}),i},disconnected:function(t){var i=this;return n(i).bind(u.onDisconnect,function(){t.call(i)}),i},connectionSlow:function(t){var i=this;return n(i).bind(u.onConnectionSlow,function(){t.call(i)}),i},reconnecting:function(t){var i=this;return n(i).bind(u.onReconnecting,function(){t.call(i)}),i},reconnected:function(t){var i=this;return n(i).bind(u.onReconnect,function(){t.call(i)}),i},stop:function(i,h){var a=this,v=a._deferral;if(a._.deferredStartHandler&&e.unbind("load",a._.deferredStartHandler),delete a._.config,delete a._.deferredStartHandler,!o&&(!a._.config||a._.config.waitForPageLoad===!0)){a.log("Stopping connection prior to negotiate.");v&&v.reject(r._.error(f.stoppedWhileLoading));return}if(a.state!==r.connectionState.disconnected)return a.log("Stopping connection."),t.clearTimeout(a._.beatHandle),t.clearInterval(a._.pingIntervalId),a.transport&&(a.transport.stop(a),h!==!1&&a.transport.abort(a,i),l(a)&&r.transports._logic.stopMonitoringKeepAlive(a),a.transport=null),a._.negotiateRequest&&(a._.negotiateRequest.abort(c),delete a._.negotiateRequest),a._.initHandler&&a._.initHandler.stop(),delete a._deferral,delete a.messageId,delete a.groupsToken,delete a.id,delete a._.pingIntervalId,delete a._.lastMessageAt,delete a._.lastActiveAt,a._.connectingMessageBuffer.clear(),n(a).unbind(u.onStart),s(a,a.state,r.connectionState.disconnected),n(a).triggerHandler(u.onDisconnect),a},log:function(n){v(n,this.logging)}};r.fn.init.prototype=r.fn;r.noConflict=function(){return n.connection===r&&(n.connection=h),r};n.connection&&(h=n.connection);n.connection=n.signalR=r})(window.jQuery,window),function(n,t,i){function s(n){n._.keepAliveData.monitoring&&l(n);u.markActive(n)&&(n._.beatHandle=t.setTimeout(function(){s(n)},n._.beatInterval))}function l(t){var i=t._.keepAliveData,u;t.state===r.connectionState.connected&&(u=(new Date).getTime()-t._.lastMessageAt,u>=i.timeout?(t.log("Keep alive timed out. Notifying transport that connection has been lost."),t.transport.lostConnection(t)):u>=i.timeoutWarning?i.userNotified||(t.log("Keep alive has been missed, connection may be dead/slow."),n(t).triggerHandler(f.onConnectionSlow),i.userNotified=!0):i.userNotified=!1)}function e(n,t){var i=n.url+t;return n.transport&&(i+="?transport="+n.transport.name),u.prepareQueryString(n,i)}function h(n){this.connection=n;this.startRequested=!1;this.startCompleted=!1;this.connectionStopped=!1}var r=n.signalR,f=n.signalR.events,c=n.signalR.changeState,o="__Start Aborted__",u;r.transports={};h.prototype={start:function(n,r,u){var f=this,e=f.connection,o=!1;if(f.startRequested||f.connectionStopped){e.log("WARNING! "+n.name+" transport cannot be started. Initialization ongoing or completed.");return}e.log(n.name+" transport starting.");n.start(e,function(){o||f.initReceived(n,r)},function(t){return o||(o=!0,f.transportFailed(n,t,u)),!f.startCompleted||f.connectionStopped});f.transportTimeoutHandle=t.setTimeout(function(){o||(o=!0,e.log(n.name+" transport timed out when trying to connect."),f.transportFailed(n,i,u))},e._.totalTransportConnectTimeout)},stop:function(){this.connectionStopped=!0;t.clearTimeout(this.transportTimeoutHandle);r.transports._logic.tryAbortStartRequest(this.connection)},initReceived:function(n,i){var u=this,f=u.connection;if(u.startRequested){f.log("WARNING! The client received multiple init messages.");return}u.connectionStopped||(u.startRequested=!0,t.clearTimeout(u.transportTimeoutHandle),f.log(n.name+" transport connected. Initiating start request."),r.transports._logic.ajaxStart(f,function(){u.startCompleted=!0;i()}))},transportFailed:function(i,u,e){var o=this.connection,h=o._deferral,s;this.connectionStopped||(t.clearTimeout(this.transportTimeoutHandle),this.startRequested?this.startCompleted||(s=r._.error(r.resources.errorDuringStartRequest,u),o.log(i.name+" transport failed during the start request. Stopping the connection."),n(o).triggerHandler(f.onError,[s]),h&&h.reject(s),o.stop()):(i.stop(o),o.log(i.name+" transport failed to connect. Attempting to fall back."),e()))}};u=r.transports._logic={ajax:function(t,i){return n.ajax(n.extend(!0,{},n.signalR.ajaxDefaults,{type:"GET",data:{},xhrFields:{withCredentials:t.withCredentials},contentType:t.contentType,dataType:t.ajaxDataType},i))},pingServer:function(t){var e,f,i=n.Deferred();return t.transport?(e=t.url+"/ping",e=u.addQs(e,t.qs),f=u.ajax(t,{url:e,success:function(n){var u;try{u=t._parseResponse(n)}catch(e){i.reject(r._.transportError(r.resources.pingServerFailedParse,t.transport,e,f));t.stop();return}u.Response==="pong"?i.resolve():i.reject(r._.transportError(r._.format(r.resources.pingServerFailedInvalidResponse,n),t.transport,null,f))},error:function(n){n.status===401||n.status===403?(i.reject(r._.transportError(r._.format(r.resources.pingServerFailedStatusCode,n.status),t.transport,n,f)),t.stop()):i.reject(r._.transportError(r.resources.pingServerFailed,t.transport,n,f))}})):i.reject(r._.transportError(r.resources.noConnectionTransport,t.transport)),i.promise()},prepareQueryString:function(n,i){var r;return r=u.addQs(i,"clientProtocol="+n.clientProtocol),r=u.addQs(r,n.qs),n.token&&(r+="&connectionToken="+t.encodeURIComponent(n.token)),n.data&&(r+="&connectionData="+t.encodeURIComponent(n.data)),r},addQs:function(t,i){var r=t.indexOf("?")!==-1?"&":"?",u;if(!i)return t;if(typeof i=="object")return t+r+n.param(i);if(typeof i=="string")return u=i.charAt(0),(u==="?"||u==="&")&&(r=""),t+r+i;throw new Error("Query string property must be either a string or object.");},getUrl:function(n,i,r,f,e){var h=i==="webSockets"?"":n.baseUrl,o=h+n.appRelativeUrl,s="transport="+i;return!e&&n.groupsToken&&(s+="&groupsToken="+t.encodeURIComponent(n.groupsToken)),r?(o+=f?"/poll":"/reconnect",!e&&n.messageId&&(s+="&messageId="+t.encodeURIComponent(n.messageId))):o+="/connect",o+="?"+s,o=u.prepareQueryString(n,o),e||(o+="&tid="+Math.floor(Math.random()*11)),o},maximizePersistentResponse:function(n){return{MessageId:n.C,Messages:n.M,Initialized:typeof n.S!="undefined"?!0:!1,ShouldReconnect:typeof n.T!="undefined"?!0:!1,LongPollDelay:n.L,GroupsToken:n.G}},updateGroups:function(n,t){t&&(n.groupsToken=t)},stringifySend:function(n,t){return typeof t=="string"||typeof t=="undefined"||t===null?t:n.json.stringify(t)},ajaxSend:function(t,i){var h=u.stringifySend(t,i),c=e(t,"/send"),o,s=function(t,u){n(u).triggerHandler(f.onError,[r._.transportError(r.resources.sendFailed,u.transport,t,o),i])};return o=u.ajax(t,{url:c,type:t.ajaxDataType==="jsonp"?"GET":"POST",contentType:r._.defaultContentType,data:{data:h},success:function(n){var i;if(n){try{i=t._parseResponse(n)}catch(r){s(r,t);t.stop();return}u.triggerReceived(t,i)}},error:function(n,i){i!=="abort"&&i!=="parsererror"&&s(n,t)}})},ajaxAbort:function(n,t){if(typeof n.transport!="undefined"){t=typeof t=="undefined"?!0:t;var i=e(n,"/abort");u.ajax(n,{url:i,async:t,timeout:1e3,type:"POST"});n.log("Fired ajax abort async = "+t+".")}},ajaxStart:function(t,i){var h=function(n){var i=t._deferral;i&&i.reject(n)},s=function(i){t.log("The start request failed. Stopping the connection.");n(t).triggerHandler(f.onError,[i]);h(i);t.stop()};t._.startRequest=u.ajax(t,{url:e(t,"/start"),success:function(n,u,f){var e;try{e=t._parseResponse(n)}catch(o){s(r._.error(r._.format(r.resources.errorParsingStartResponse,n),o,f));return}e.Response==="started"?i():s(r._.error(r._.format(r.resources.invalidStartResponse,n),null,f))},error:function(n,i,u){i!==o?s(r._.error(r.resources.errorDuringStartRequest,u,n)):(t.log("The start request aborted because connection.stop() was called."),h(r._.error(r.resources.stoppedDuringStartRequest,null,n)))}})},tryAbortStartRequest:function(n){n._.startRequest&&(n._.startRequest.abort(o),delete n._.startRequest)},tryInitialize:function(n,t,i){t.Initialized&&i?i():t.Initialized&&n.log("WARNING! The client received an init message after reconnecting.")},triggerReceived:function(t,i){t._.connectingMessageBuffer.tryBuffer(i)||n(t).triggerHandler(f.onReceived,[i])},processMessages:function(t,i,r){var f;u.markLastMessage(t);i&&(f=u.maximizePersistentResponse(i),u.updateGroups(t,f.GroupsToken),f.MessageId&&(t.messageId=f.MessageId),f.Messages&&(n.each(f.Messages,function(n,i){u.triggerReceived(t,i)}),u.tryInitialize(t,f,r)))},monitorKeepAlive:function(t){var i=t._.keepAliveData;i.monitoring?t.log("Tried to monitor keep alive but it's already being monitored."):(i.monitoring=!0,u.markLastMessage(t),t._.keepAliveData.reconnectKeepAliveUpdate=function(){u.markLastMessage(t)},n(t).bind(f.onReconnect,t._.keepAliveData.reconnectKeepAliveUpdate),t.log("Now monitoring keep alive with a warning timeout of "+i.timeoutWarning+", keep alive timeout of "+i.timeout+" and disconnecting timeout of "+t.disconnectTimeout))},stopMonitoringKeepAlive:function(t){var i=t._.keepAliveData;i.monitoring&&(i.monitoring=!1,n(t).unbind(f.onReconnect,t._.keepAliveData.reconnectKeepAliveUpdate),t._.keepAliveData={},t.log("Stopping the monitoring of the keep alive."))},startHeartbeat:function(n){n._.lastActiveAt=(new Date).getTime();s(n)},markLastMessage:function(n){n._.lastMessageAt=(new Date).getTime()},markActive:function(n){return u.verifyLastActive(n)?(n._.lastActiveAt=(new Date).getTime(),!0):!1},isConnectedOrReconnecting:function(n){return n.state===r.connectionState.connected||n.state===r.connectionState.reconnecting},ensureReconnectingState:function(t){return c(t,r.connectionState.connected,r.connectionState.reconnecting)===!0&&n(t).triggerHandler(f.onReconnecting),t.state===r.connectionState.reconnecting},clearReconnectTimeout:function(n){n&&n._.reconnectTimeout&&(t.clearTimeout(n._.reconnectTimeout),delete n._.reconnectTimeout)},verifyLastActive:function(t){if((new Date).getTime()-t._.lastActiveAt>=t.reconnectWindow){var i=r._.format(r.resources.reconnectWindowTimeout,new Date(t._.lastActiveAt),t.reconnectWindow);return t.log(i),n(t).triggerHandler(f.onError,[r._.error(i,"TimeoutException")]),t.stop(!1,!1),!1}return!0},reconnect:function(n,i){var f=r.transports[i];if(u.isConnectedOrReconnecting(n)&&!n._.reconnectTimeout){if(!u.verifyLastActive(n))return;n._.reconnectTimeout=t.setTimeout(function(){u.verifyLastActive(n)&&(f.stop(n),u.ensureReconnectingState(n)&&(n.log(i+" reconnecting."),f.start(n)))},n.reconnectDelay)}},handleParseFailure:function(t,i,u,e,o){var s=r._.transportError(r._.format(r.resources.parseFailed,i),t.transport,u,o);e&&e(s)?t.log("Failed to parse server response while attempting to connect."):(n(t).triggerHandler(f.onError,[s]),t.stop())},initHandler:function(n){return new h(n)},foreverFrame:{count:0,connections:{}}}}(window.jQuery,window),function(n,t){var r=n.signalR,u=n.signalR.events,f=n.signalR.changeState,i=r.transports._logic;r.transports.webSockets={name:"webSockets",supportsKeepAlive:function(){return!0},send:function(t,f){var e=i.stringifySend(t,f);try{t.socket.send(e)}catch(o){n(t).triggerHandler(u.onError,[r._.transportError(r.resources.webSocketsInvalidState,t.transport,o,t.socket),f])}},start:function(e,o,s){var h,c=!1,l=this,a=!o,v=n(e);if(!t.WebSocket){s();return}e.socket||(h=e.webSocketServerUrl?e.webSocketServerUrl:e.wsProtocol+e.host,h+=i.getUrl(e,this.name,a),e.log("Connecting to websocket endpoint '"+h+"'."),e.socket=new t.WebSocket(h),e.socket.onopen=function(){c=!0;e.log("Websocket opened.");i.clearReconnectTimeout(e);f(e,r.connectionState.reconnecting,r.connectionState.connected)===!0&&v.triggerHandler(u.onReconnect)},e.socket.onclose=function(t){var i;this===e.socket&&(c&&typeof t.wasClean!="undefined"&&t.wasClean===!1?(i=r._.transportError(r.resources.webSocketClosed,e.transport,t),e.log("Unclean disconnect from websocket: "+(t.reason||"[no reason given]."))):e.log("Websocket closed."),s&&s(i)||(i&&n(e).triggerHandler(u.onError,[i]),l.reconnect(e)))},e.socket.onmessage=function(t){var r;try{r=e._parseResponse(t.data)}catch(u){i.handleParseFailure(e,t.data,u,s,t);return}r&&(n.isEmptyObject(r)||r.M?i.processMessages(e,r,o):i.triggerReceived(e,r))})},reconnect:function(n){i.reconnect(n,this.name)},lostConnection:function(n){this.reconnect(n)},stop:function(n){i.clearReconnectTimeout(n);n.socket&&(n.log("Closing the Websocket."),n.socket.close(),n.socket=null)},abort:function(n,t){i.ajaxAbort(n,t)}}}(window.jQuery,window),function(n,t){var i=n.signalR,u=n.signalR.events,e=n.signalR.changeState,r=i.transports._logic,f=function(n){t.clearTimeout(n._.reconnectAttemptTimeoutHandle);delete n._.reconnectAttemptTimeoutHandle};i.transports.serverSentEvents={name:"serverSentEvents",supportsKeepAlive:function(){return!0},timeOut:3e3,start:function(o,s,h){var c=this,l=!1,a=n(o),v=!s,y;if(o.eventSource&&(o.log("The connection already has an event source. Stopping it."),o.stop()),!t.EventSource){h&&(o.log("This browser doesn't support SSE."),h());return}y=r.getUrl(o,this.name,v);try{o.log("Attempting to connect to SSE endpoint '"+y+"'.");o.eventSource=new t.EventSource(y,{withCredentials:o.withCredentials})}catch(p){o.log("EventSource failed trying to connect with error "+p.Message+".");h?h():(a.triggerHandler(u.onError,[i._.transportError(i.resources.eventSourceFailedToConnect,o.transport,p)]),v&&c.reconnect(o));return}v&&(o._.reconnectAttemptTimeoutHandle=t.setTimeout(function(){l===!1&&o.eventSource.readyState!==t.EventSource.OPEN&&c.reconnect(o)},c.timeOut));o.eventSource.addEventListener("open",function(){o.log("EventSource connected.");f(o);r.clearReconnectTimeout(o);l===!1&&(l=!0,e(o,i.connectionState.reconnecting,i.connectionState.connected)===!0&&a.triggerHandler(u.onReconnect))},!1);o.eventSource.addEventListener("message",function(n){var t;if(n.data!=="initialized"){try{t=o._parseResponse(n.data)}catch(i){r.handleParseFailure(o,n.data,i,h,n);return}r.processMessages(o,t,s)}},!1);o.eventSource.addEventListener("error",function(n){var r=i._.transportError(i.resources.eventSourceError,o.transport,n);this===o.eventSource&&(h&&h(r)||(o.log("EventSource readyState: "+o.eventSource.readyState+"."),n.eventPhase===t.EventSource.CLOSED?(o.log("EventSource reconnecting due to the server connection ending."),c.reconnect(o)):(o.log("EventSource error."),a.triggerHandler(u.onError,[r]))))},!1)},reconnect:function(n){r.reconnect(n,this.name)},lostConnection:function(n){this.reconnect(n)},send:function(n,t){r.ajaxSend(n,t)},stop:function(n){f(n);r.clearReconnectTimeout(n);n&&n.eventSource&&(n.log("EventSource calling close()."),n.eventSource.close(),n.eventSource=null,delete n.eventSource)},abort:function(n,t){r.ajaxAbort(n,t)}}}(window.jQuery,window),function(n,t){var r=n.signalR,e=n.signalR.events,o=n.signalR.changeState,i=r.transports._logic,u=function(){var n=t.document.createElement("iframe");return n.setAttribute("style","position:absolute;top:0;left:0;width:0;height:0;visibility:hidden;"),n},f=function(){var i=null,f=1e3,n=0;return{prevent:function(){r._.ieVersion<=8&&(n===0&&(i=t.setInterval(function(){var n=u();t.document.body.appendChild(n);t.document.body.removeChild(n);n=null},f)),n++)},cancel:function(){n===1&&t.clearInterval(i);n>0&&n--}}}();r.transports.foreverFrame={name:"foreverFrame",supportsKeepAlive:function(){return!0},iframeClearThreshold:50,start:function(n,r,e){var l=this,s=i.foreverFrame.count+=1,h,o=u(),c=function(){n.log("Forever frame iframe finished loading and is no longer receiving messages.");e&&e()||l.reconnect(n)};if(t.EventSource){e&&(n.log("Forever Frame is not supported by SignalR on browsers with SSE support."),e());return}o.setAttribute("data-signalr-connection-id",n.id);f.prevent();h=i.getUrl(n,this.name);h+="&frameId="+s;t.document.documentElement.appendChild(o);n.log("Binding to iframe's load event.");o.addEventListener?o.addEventListener("load",c,!1):o.attachEvent&&o.attachEvent("onload",c);o.src=h;i.foreverFrame.connections[s]=n;n.frame=o;n.frameId=s;r&&(n.onSuccess=function(){n.log("Iframe transport started.");r()})},reconnect:function(n){var r=this;i.isConnectedOrReconnecting(n)&&i.verifyLastActive(n)&&t.setTimeout(function(){if(i.verifyLastActive(n)&&n.frame&&i.ensureReconnectingState(n)){var u=n.frame,t=i.getUrl(n,r.name,!0)+"&frameId="+n.frameId;n.log("Updating iframe src to '"+t+"'.");u.src=t}},n.reconnectDelay)},lostConnection:function(n){this.reconnect(n)},send:function(n,t){i.ajaxSend(n,t)},receive:function(t,u){var f,e,o;if(t.json!==t._originalJson&&(u=t._originalJson.stringify(u)),o=t._parseResponse(u),i.processMessages(t,o,t.onSuccess),t.state===n.signalR.connectionState.connected&&(t.frameMessageCount=(t.frameMessageCount||0)+1,t.frameMessageCount>r.transports.foreverFrame.iframeClearThreshold&&(t.frameMessageCount=0,f=t.frame.contentWindow||t.frame.contentDocument,f&&f.document&&f.document.body)))for(e=f.document.body;e.firstChild;)e.removeChild(e.firstChild)},stop:function(n){var r=null;if(f.cancel(),n.frame){if(n.frame.stop)n.frame.stop();else try{r=n.frame.contentWindow||n.frame.contentDocument;r.document&&r.document.execCommand&&r.document.execCommand("Stop")}catch(u){n.log("Error occurred when stopping foreverFrame transport. Message = "+u.message+".")}n.frame.parentNode===t.document.documentElement&&t.document.documentElement.removeChild(n.frame);delete i.foreverFrame.connections[n.frameId];n.frame=null;n.frameId=null;delete n.frame;delete n.frameId;delete n.onSuccess;delete n.frameMessageCount;n.log("Stopping forever frame.")}},abort:function(n,t){i.ajaxAbort(n,t)},getConnection:function(n){return i.foreverFrame.connections[n]},started:function(t){o(t,r.connectionState.reconnecting,r.connectionState.connected)===!0&&n(t).triggerHandler(e.onReconnect)}}}(window.jQuery,window),function(n,t){var r=n.signalR,u=n.signalR.events,e=n.signalR.changeState,f=n.signalR.isDisconnecting,i=r.transports._logic;r.transports.longPolling={name:"longPolling",supportsKeepAlive:function(){return!1},reconnectDelay:3e3,start:function(o,s,h){var a=this,v=function(){v=n.noop;o.log("LongPolling connected.");s?s():o.log("WARNING! The client received an init message after reconnecting.")},y=function(n){return h(n)?(o.log("LongPolling failed to connect."),!0):!1},c=o._,l=0,p=function(i){t.clearTimeout(c.reconnectTimeoutId);c.reconnectTimeoutId=null;e(i,r.connectionState.reconnecting,r.connectionState.connected)===!0&&(i.log("Raising the reconnect event"),n(i).triggerHandler(u.onReconnect))},w=36e5;o.pollXhr&&(o.log("Polling xhr requests already exists, aborting."),o.stop());o.messageId=null;c.reconnectTimeoutId=null;c.pollTimeoutId=t.setTimeout(function(){(function e(s,h){var g=s.messageId,nt=g===null,k=!nt,tt=!h,d=i.getUrl(s,a.name,k,tt,!0),b={};(s.messageId&&(b.messageId=s.messageId),s.groupsToken&&(b.groupsToken=s.groupsToken),f(s)!==!0)&&(o.log("Opening long polling request to '"+d+"'."),s.pollXhr=i.ajax(o,{xhrFields:{onprogress:function(){i.markLastMessage(o)}},url:d,type:"POST",contentType:r._.defaultContentType,data:b,timeout:o._.pollTimeout,success:function(r){var h,w=0,u,a;o.log("Long poll complete.");l=0;try{h=o._parseResponse(r)}catch(b){i.handleParseFailure(s,r,b,y,s.pollXhr);return}(c.reconnectTimeoutId!==null&&p(s),h&&(u=i.maximizePersistentResponse(h)),i.processMessages(s,h,v),u&&n.type(u.LongPollDelay)==="number"&&(w=u.LongPollDelay),f(s)!==!0)&&(a=u&&u.ShouldReconnect,!a||i.ensureReconnectingState(s))&&(w>0?c.pollTimeoutId=t.setTimeout(function(){e(s,a)},w):e(s,a))},error:function(f,h){var v=r._.transportError(r.resources.longPollFailed,o.transport,f,s.pollXhr);if(t.clearTimeout(c.reconnectTimeoutId),c.reconnectTimeoutId=null,h==="abort"){o.log("Aborted xhr request.");return}if(!y(v)){if(l++,o.state!==r.connectionState.reconnecting&&(o.log("An error occurred using longPolling. Status = "+h+". Response = "+f.responseText+"."),n(s).triggerHandler(u.onError,[v])),(o.state===r.connectionState.connected||o.state===r.connectionState.reconnecting)&&!i.verifyLastActive(o))return;if(!i.ensureReconnectingState(s))return;c.pollTimeoutId=t.setTimeout(function(){e(s,!0)},a.reconnectDelay)}}}),k&&h===!0&&(c.reconnectTimeoutId=t.setTimeout(function(){p(s)},Math.min(1e3*(Math.pow(2,l)-1),w))))})(o)},250)},lostConnection:function(n){n.pollXhr&&n.pollXhr.abort("lostConnection")},send:function(n,t){i.ajaxSend(n,t)},stop:function(n){t.clearTimeout(n._.pollTimeoutId);t.clearTimeout(n._.reconnectTimeoutId);delete n._.pollTimeoutId;delete n._.reconnectTimeoutId;n.pollXhr&&(n.pollXhr.abort(),n.pollXhr=null,delete n.pollXhr)},abort:function(n,t){i.ajaxAbort(n,t)}}}(window.jQuery,window),function(n){function r(n){return n+e}function s(n,t,i){for(var f=n.length,u=[],r=0;r<f;r+=1)n.hasOwnProperty(r)&&(u[r]=t.call(i,n[r],r,n));return u}function h(t){return n.isFunction(t)?null:n.type(t)==="undefined"?null:t}function u(n){for(var t in n)if(n.hasOwnProperty(t))return!0;return!1}function f(n,t){var i=n._.invocationCallbacks,r,f;u(i)&&n.log("Clearing hub invocation callbacks with error: "+t+".");n._.invocationCallbackId=0;delete n._.invocationCallbacks;n._.invocationCallbacks={};for(f in i)r=i[f],r.method.call(r.scope,{E:t})}function i(n,t){return new i.fn.init(n,t)}function t(i,r){var u={qs:null,logging:!1,useDefaultPath:!0};return n.extend(u,r),(!i||u.useDefaultPath)&&(i=(i||"")+"/signalr"),new t.fn.init(i,u)}var e=".hubProxy",o=n.signalR;i.fn=i.prototype={init:function(n,t){this.state={};this.connection=n;this.hubName=t;this._={callbackMap:{}}},constructor:i,hasSubscriptions:function(){return u(this._.callbackMap)},on:function(t,i){var u=this,f=u._.callbackMap;return t=t.toLowerCase(),f[t]||(f[t]={}),f[t][i]=function(n,t){i.apply(u,t)},n(u).bind(r(t),f[t][i]),u},off:function(t,i){var e=this,o=e._.callbackMap,f;return t=t.toLowerCase(),f=o[t],f&&(f[i]?(n(e).unbind(r(t),f[i]),delete f[i],u(f)||delete o[t]):i||(n(e).unbind(r(t)),delete o[t])),e},invoke:function(t){var i=this,r=i.connection,e=n.makeArray(arguments).slice(1),c=s(e,h),f={H:i.hubName,M:t,A:c,I:r._.invocationCallbackId},u=n.Deferred(),l=function(f){var e=i._maximizeHubResponse(f),h,s;n.extend(i.state,e.State);e.Progress?u.notifyWith?u.notifyWith(i,[e.Progress.Data]):r._.progressjQueryVersionLogged||(r.log("A hub method invocation progress update was received but the version of jQuery in use ("+n.prototype.jquery+") does not support progress updates. Upgrade to jQuery 1.7+ to receive progress notifications."),r._.progressjQueryVersionLogged=!0):e.Error?(e.StackTrace&&r.log(e.Error+"\n"+e.StackTrace+"."),h=e.IsHubException?"HubException":"Exception",s=o._.error(e.Error,h),s.data=e.ErrorData,r.log(i.hubName+"."+t+" failed to execute. Error: "+s.message),u.rejectWith(i,[s])):(r.log("Invoked "+i.hubName+"."+t),u.resolveWith(i,[e.Result]))};return r._.invocationCallbacks[r._.invocationCallbackId.toString()]={scope:i,method:l},r._.invocationCallbackId+=1,n.isEmptyObject(i.state)||(f.S=i.state),r.log("Invoking "+i.hubName+"."+t),r.send(f),u.promise()},_maximizeHubResponse:function(n){return{State:n.S,Result:n.R,Progress:n.P?{Id:n.P.I,Data:n.P.D}:null,Id:n.I,IsHubException:n.H,Error:n.E,StackTrace:n.T,ErrorData:n.D}}};i.fn.init.prototype=i.fn;t.fn=t.prototype=n.connection();t.fn.init=function(t,i){var e={qs:null,logging:!1,useDefaultPath:!0},u=this;n.extend(e,i);n.signalR.fn.init.call(u,t,e.qs,e.logging);u.proxies={};u._.invocationCallbackId=0;u._.invocationCallbacks={};u.received(function(t){var f,o,e,i,s,h;t&&(typeof t.P!="undefined"?(e=t.P.I.toString(),i=u._.invocationCallbacks[e],i&&i.method.call(i.scope,t)):typeof t.I!="undefined"?(e=t.I.toString(),i=u._.invocationCallbacks[e],i&&(u._.invocationCallbacks[e]=null,delete u._.invocationCallbacks[e],i.method.call(i.scope,t))):(f=this._maximizeClientHubInvocation(t),u.log("Triggering client hub event '"+f.Method+"' on hub '"+f.Hub+"'."),s=f.Hub.toLowerCase(),h=f.Method.toLowerCase(),o=this.proxies[s],n.extend(o.state,f.State),n(o).triggerHandler(r(h),[f.Args])))});u.error(function(n,t){var i,r;t&&(i=t.I,r=u._.invocationCallbacks[i],r&&(u._.invocationCallbacks[i]=null,delete u._.invocationCallbacks[i],r.method.call(r.scope,{E:n})))});u.reconnecting(function(){u.transport&&u.transport.name==="webSockets"&&f(u,"Connection started reconnecting before invocation result was received.")});u.disconnected(function(){f(u,"Connection was disconnected before invocation result was received.")})};t.fn._maximizeClientHubInvocation=function(n){return{Hub:n.H,Method:n.M,Args:n.A,State:n.S}};t.fn._registerSubscribedHubs=function(){var t=this;t._subscribedToHubs||(t._subscribedToHubs=!0,t.starting(function(){var i=[];n.each(t.proxies,function(n){this.hasSubscriptions()&&(i.push({name:n}),t.log("Client subscribed to hub '"+n+"'."))});i.length===0&&t.log("No hubs have been subscribed to. The client will not receive data from hubs. To fix, declare at least one client side function prior to connection start for each hub you wish to subscribe to.");t.data=t.json.stringify(i)}))};t.fn.createHubProxy=function(n){n=n.toLowerCase();var t=this.proxies[n];return t||(t=i(this,n),this.proxies[n]=t),this._registerSubscribedHubs(),t};t.fn.init.prototype=t.fn;n.hubConnection=t}(window.jQuery,window),function(n){n.signalR.version="2.2.2"}(window.jQuery);
// SIG // Begin signature block
// SIG // MIIdkQYJKoZIhvcNAQcCoIIdgjCCHX4CAQExCzAJBgUr
// SIG // DgMCGgUAMGcGCisGAQQBgjcCAQSgWTBXMDIGCisGAQQB
// SIG // gjcCAR4wJAIBAQQQEODJBs441BGiowAQS9NQkAIBAAIB
// SIG // AAIBAAIBAAIBADAhMAkGBSsOAwIaBQAEFAkypCkQ5YoO
// SIG // 6xr3PmU56makf1qToIIYUzCCBMIwggOqoAMCAQICEzMA
// SIG // AADA3iw9B5TkSXkAAAAAAMAwDQYJKoZIhvcNAQEFBQAw
// SIG // dzELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0
// SIG // b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1p
// SIG // Y3Jvc29mdCBDb3Jwb3JhdGlvbjEhMB8GA1UEAxMYTWlj
// SIG // cm9zb2Z0IFRpbWUtU3RhbXAgUENBMB4XDTE2MDkwNzE3
// SIG // NTg1MFoXDTE4MDkwNzE3NTg1MFowgbIxCzAJBgNVBAYT
// SIG // AlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQH
// SIG // EwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y
// SIG // cG9yYXRpb24xDDAKBgNVBAsTA0FPQzEnMCUGA1UECxMe
// SIG // bkNpcGhlciBEU0UgRVNOOjdBQjUtMkRGMi1EQTNGMSUw
// SIG // IwYDVQQDExxNaWNyb3NvZnQgVGltZS1TdGFtcCBTZXJ2
// SIG // aWNlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
// SIG // AQEA6IilUn5JVQAeBDnPfq/eZJd9prlzSE2HE22kbbiZ
// SIG // fjEMxJw9HlNiVF8dpiNOjjZ03xVt/WUDxdab1FpBwa0u
// SIG // pvKCsuxJuDpfdUSeSp8MtVb2Nk2sKoflccHQEUYDnhbR
// SIG // ZtEvj2vga17VVIU7RpdARjLChhECnQzRiQ9ufSKuO6jy
// SIG // 8NwppZRN/2BcNRhqCfQt68viO57cSIyWAS1wNdtr8Pdz
// SIG // jg/81hYdUlatgHoz8XIyrRx9oY6HkysJBXjDbSHpK1Er
// SIG // 4W7ghsVua+eCltGLrOKPN/dpfVc/JUhJ9o/04D06g+hG
// SIG // Zc5KcOeFUsHnXv78qoGSK0W2rdGwIW/HaGPLwwIDAQAB
// SIG // o4IBCTCCAQUwHQYDVR0OBBYEFBX+StozidMZhdVkWcTc
// SIG // U0EQT7c1MB8GA1UdIwQYMBaAFCM0+NlSRnAK7UD7dvuz
// SIG // K7DDNbMPMFQGA1UdHwRNMEswSaBHoEWGQ2h0dHA6Ly9j
// SIG // cmwubWljcm9zb2Z0LmNvbS9wa2kvY3JsL3Byb2R1Y3Rz
// SIG // L01pY3Jvc29mdFRpbWVTdGFtcFBDQS5jcmwwWAYIKwYB
// SIG // BQUHAQEETDBKMEgGCCsGAQUFBzAChjxodHRwOi8vd3d3
// SIG // Lm1pY3Jvc29mdC5jb20vcGtpL2NlcnRzL01pY3Jvc29m
// SIG // dFRpbWVTdGFtcFBDQS5jcnQwEwYDVR0lBAwwCgYIKwYB
// SIG // BQUHAwgwDQYJKoZIhvcNAQEFBQADggEBABniQLsyTEfp
// SIG // KLzIvESvy7X6yms1pUeS8dNoTPRhZjJNc1gknKX1WOsa
// SIG // n81hZZQhRwbTmiWai4lfe43zQi6RtD1C89uAlkpBu4yf
// SIG // pmWgkonGe2Qt21g4dc6XOeJZWFcsk5EVWMf5rXPRro6m
// SIG // 0vAgWYI2k/ybBAp7xgD1HC2LkhredwlNW1LojcLswIWo
// SIG // uIJXGMqt/+6jJpgLW52wmYUeckqDMuwcHcze9Hay7Wnw
// SIG // iUJ3HwB11Tua+kl+FP9ids71oQPJaoWbvUkbpRRkb3/N
// SIG // VU3/pOaydk4y+nWLUnIaKCeWFb1JOou231dFQTfMaWlI
// SIG // ZPI2wZ8YQzyiLw752ZYT7iYwggYAMIID6KADAgECAhMz
// SIG // AAAAww6bp9iy3PcsAAAAAADDMA0GCSqGSIb3DQEBCwUA
// SIG // MH4xCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpXYXNoaW5n
// SIG // dG9uMRAwDgYDVQQHEwdSZWRtb25kMR4wHAYDVQQKExVN
// SIG // aWNyb3NvZnQgQ29ycG9yYXRpb24xKDAmBgNVBAMTH01p
// SIG // Y3Jvc29mdCBDb2RlIFNpZ25pbmcgUENBIDIwMTEwHhcN
// SIG // MTcwODExMjAyMDI0WhcNMTgwODExMjAyMDI0WjB0MQsw
// SIG // CQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQ
// SIG // MA4GA1UEBxMHUmVkbW9uZDEeMBwGA1UEChMVTWljcm9z
// SIG // b2Z0IENvcnBvcmF0aW9uMR4wHAYDVQQDExVNaWNyb3Nv
// SIG // ZnQgQ29ycG9yYXRpb24wggEiMA0GCSqGSIb3DQEBAQUA
// SIG // A4IBDwAwggEKAoIBAQC7V9c40bEGf0ktqW2zY596urY6
// SIG // IVu0mK6N1KSBoMV1xSzvgkAqt4FTd/NjAQq8zjeEA0BD
// SIG // V4JLzu0ftv2AbcnCkV0Fx9xWWQDhDOtX3v3xuJAnv3VK
// SIG // /HWycli2xUibM2IF0ZWUpb85Iq2NEk1GYtoyGc6qIlxW
// SIG // SLFvRclndmJdMIijLyjFH1Aq2YbbGhElgcL09Wcu53kd
// SIG // 9eIcdfROzMf8578LgEcp/8/NabEMC2DrZ+aEG5tN/W1H
// SIG // OsfZwWFh8pUSoQ0HrmMh2PSZHP94VYHupXnoIIJfCtq1
// SIG // UxlUAVcNh5GNwnzxVIaA4WLbgnM+Jl7wQBLSOdUmAw2F
// SIG // iDFfCguLAgMBAAGjggF/MIIBezAfBgNVHSUEGDAWBgor
// SIG // BgEEAYI3TAgBBggrBgEFBQcDAzAdBgNVHQ4EFgQUpxNd
// SIG // HyGJVegD7p4XNuryVIg1Ga8wUQYDVR0RBEowSKRGMEQx
// SIG // DDAKBgNVBAsTA0FPQzE0MDIGA1UEBRMrMjMwMDEyK2M4
// SIG // MDRiNWVhLTQ5YjQtNDIzOC04MzYyLWQ4NTFmYTIyNTRm
// SIG // YzAfBgNVHSMEGDAWgBRIbmTlUAXTgqoXNzcitW2oynUC
// SIG // lTBUBgNVHR8ETTBLMEmgR6BFhkNodHRwOi8vd3d3Lm1p
// SIG // Y3Jvc29mdC5jb20vcGtpb3BzL2NybC9NaWNDb2RTaWdQ
// SIG // Q0EyMDExXzIwMTEtMDctMDguY3JsMGEGCCsGAQUFBwEB
// SIG // BFUwUzBRBggrBgEFBQcwAoZFaHR0cDovL3d3dy5taWNy
// SIG // b3NvZnQuY29tL3BraW9wcy9jZXJ0cy9NaWNDb2RTaWdQ
// SIG // Q0EyMDExXzIwMTEtMDctMDguY3J0MAwGA1UdEwEB/wQC
// SIG // MAAwDQYJKoZIhvcNAQELBQADggIBAE2XTzR+8XCTnOPV
// SIG // GkucEX5rJsSlJPTfRNQkurNqCImZmssx53Cb/xQdsAc5
// SIG // f+QwOxMi3g7IlWe7bn74fJWkkII3k6aD00kCwaytWe+R
// SIG // t6dmAA6iTCXU3OddBwLKKDRlOzmDrZUqjsqg6Ag6HP4+
// SIG // e0BJlE2OVCUK5bHHCu5xN8abXjb1p0JE+7yHsA3ANdkm
// SIG // h1//Z+8odPeKMAQRimfMSzVgaiHnw40Hg16bq51xHykm
// SIG // CRHU9YLT0jYHKa7okm2QfwDJqFvu0ARl+6EOV1PM8piJ
// SIG // 858Vk8gGxGNSYQJPV0gc9ft1Esq1+fTCaV+7oZ0NaYMn
// SIG // 64M+HWsxw+4O8cSEQ4fuMZwGADJ8tyCKuQgj6lawGNSy
// SIG // vRXsN+1k02sVAiPGijOHOtGbtsCWWSygAVOEAV/ye8F6
// SIG // sOzU2FL2X3WBRFkWOCdTu1DzXnHf99dR3DHVGmM1Kpd+
// SIG // n2Y3X89VM++yyrwsI6pEHu77Z0i06ELDD4pRWKJGAmEm
// SIG // Whm/XJTpqEBw51swTHyA1FBnoqXuDus9tfHleR7h9VgZ
// SIG // b7uJbXjiIFgl/+RIs+av8bJABBdGUNQMbJEUfe7K4vYm
// SIG // 3hs7BGdRLg+kF/dC/z+RiTH4p7yz5TpS3Cozf0pkkWXY
// SIG // ZRG222q3tGxS/L+LcRbELM5zmqDpXQjBRUWlKYbsATFt
// SIG // XnTGVjELMIIGBzCCA++gAwIBAgIKYRZoNAAAAAAAHDAN
// SIG // BgkqhkiG9w0BAQUFADBfMRMwEQYKCZImiZPyLGQBGRYD
// SIG // Y29tMRkwFwYKCZImiZPyLGQBGRYJbWljcm9zb2Z0MS0w
// SIG // KwYDVQQDEyRNaWNyb3NvZnQgUm9vdCBDZXJ0aWZpY2F0
// SIG // ZSBBdXRob3JpdHkwHhcNMDcwNDAzMTI1MzA5WhcNMjEw
// SIG // NDAzMTMwMzA5WjB3MQswCQYDVQQGEwJVUzETMBEGA1UE
// SIG // CBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEe
// SIG // MBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMSEw
// SIG // HwYDVQQDExhNaWNyb3NvZnQgVGltZS1TdGFtcCBQQ0Ew
// SIG // ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCf
// SIG // oWyx39tIkip8ay4Z4b3i48WZUSNQrc7dGE4kD+7Rp9FM
// SIG // rXQwIBHrB9VUlRVJlBtCkq6YXDAm2gBr6Hu97IkHD/cO
// SIG // BJjwicwfyzMkh53y9GccLPx754gd6udOo6HBI1PKjfpF
// SIG // zwnQXq/QsEIEovmmbJNn1yjcRlOwhtDlKEYuJ6yGT1VS
// SIG // DOQDLPtqkJAwbofzWTCd+n7Wl7PoIZd++NIT8wi3U21S
// SIG // tEWQn0gASkdmEScpZqiX5NMGgUqi+YSnEUcUCYKfhO1V
// SIG // eP4Bmh1QCIUAEDBG7bfeI0a7xC1Un68eeEExd8yb3zuD
// SIG // k6FhArUdDbH895uyAc4iS1T/+QXDwiALAgMBAAGjggGr
// SIG // MIIBpzAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQj
// SIG // NPjZUkZwCu1A+3b7syuwwzWzDzALBgNVHQ8EBAMCAYYw
// SIG // EAYJKwYBBAGCNxUBBAMCAQAwgZgGA1UdIwSBkDCBjYAU
// SIG // DqyCYEBWJ5flJRP8KuEKU5VZ5KShY6RhMF8xEzARBgoJ
// SIG // kiaJk/IsZAEZFgNjb20xGTAXBgoJkiaJk/IsZAEZFglt
// SIG // aWNyb3NvZnQxLTArBgNVBAMTJE1pY3Jvc29mdCBSb290
// SIG // IENlcnRpZmljYXRlIEF1dGhvcml0eYIQea0WoUqgpa1M
// SIG // c1j0BxMuZTBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8v
// SIG // Y3JsLm1pY3Jvc29mdC5jb20vcGtpL2NybC9wcm9kdWN0
// SIG // cy9taWNyb3NvZnRyb290Y2VydC5jcmwwVAYIKwYBBQUH
// SIG // AQEESDBGMEQGCCsGAQUFBzAChjhodHRwOi8vd3d3Lm1p
// SIG // Y3Jvc29mdC5jb20vcGtpL2NlcnRzL01pY3Jvc29mdFJv
// SIG // b3RDZXJ0LmNydDATBgNVHSUEDDAKBggrBgEFBQcDCDAN
// SIG // BgkqhkiG9w0BAQUFAAOCAgEAEJeKw1wDRDbd6bStd9vO
// SIG // eVFNAbEudHFbbQwTq86+e4+4LtQSooxtYrhXAstOIBNQ
// SIG // md16QOJXu69YmhzhHQGGrLt48ovQ7DsB7uK+jwoFyI1I
// SIG // 4vBTFd1Pq5Lk541q1YDB5pTyBi+FA+mRKiQicPv2/OR4
// SIG // mS4N9wficLwYTp2OawpylbihOZxnLcVRDupiXD8WmIsg
// SIG // P+IHGjL5zDFKdjE9K3ILyOpwPf+FChPfwgphjvDXuBfr
// SIG // Tot/xTUrXqO/67x9C0J71FNyIe4wyrt4ZVxbARcKFA7S
// SIG // 2hSY9Ty5ZlizLS/n+YWGzFFW6J1wlGysOUzU9nm/qhh6
// SIG // YinvopspNAZ3GmLJPR5tH4LwC8csu89Ds+X57H2146So
// SIG // dDW4TsVxIxImdgs8UoxxWkZDFLyzs7BNZ8ifQv+AeSGA
// SIG // nhUwZuhCEl4ayJ4iIdBD6Svpu/RIzCzU2DKATCYqSCRf
// SIG // WupW76bemZ3KOm+9gSd0BhHudiG/m4LBJ1S2sWo9iaF2
// SIG // YbRuoROmv6pH8BJv/YoybLL+31HIjCPJZr2dHYcSZAI9
// SIG // La9Zj7jkIeW1sMpjtHhUBdRBLlCslLCleKuzoJZ1GtmS
// SIG // hxN1Ii8yqAhuoFuMJb+g74TKIdbrHk/Jmu5J4PcBZW+J
// SIG // C33Iacjmbuqnl84xKf8OxVtc2E0bodj6L54/LlUWa8kT
// SIG // o/0wggd6MIIFYqADAgECAgphDpDSAAAAAAADMA0GCSqG
// SIG // SIb3DQEBCwUAMIGIMQswCQYDVQQGEwJVUzETMBEGA1UE
// SIG // CBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVkbW9uZDEe
// SIG // MBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTIw
// SIG // MAYDVQQDEylNaWNyb3NvZnQgUm9vdCBDZXJ0aWZpY2F0
// SIG // ZSBBdXRob3JpdHkgMjAxMTAeFw0xMTA3MDgyMDU5MDla
// SIG // Fw0yNjA3MDgyMTA5MDlaMH4xCzAJBgNVBAYTAlVTMRMw
// SIG // EQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQHEwdSZWRt
// SIG // b25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29ycG9yYXRp
// SIG // b24xKDAmBgNVBAMTH01pY3Jvc29mdCBDb2RlIFNpZ25p
// SIG // bmcgUENBIDIwMTEwggIiMA0GCSqGSIb3DQEBAQUAA4IC
// SIG // DwAwggIKAoICAQCr8PpyEBwurdhuqoIQTTS68rZYIZ9C
// SIG // Gypr6VpQqrgGOBoESbp/wwwe3TdrxhLYC/A4wpkGsMg5
// SIG // 1QEUMULTiQ15ZId+lGAkbK+eSZzpaF7S35tTsgosw6/Z
// SIG // qSuuegmv15ZZymAaBelmdugyUiYSL+erCFDPs0S3XdjE
// SIG // LgN1q2jzy23zOlyhFvRGuuA4ZKxuZDV4pqBjDy3TQJP4
// SIG // 494HDdVceaVJKecNvqATd76UPe/74ytaEB9NViiienLg
// SIG // Ejq3SV7Y7e1DkYPZe7J7hhvZPrGMXeiJT4Qa8qEvWeSQ
// SIG // Oy2uM1jFtz7+MtOzAz2xsq+SOH7SnYAs9U5WkSE1JcM5
// SIG // bmR/U7qcD60ZI4TL9LoDho33X/DQUr+MlIe8wCF0JV8Y
// SIG // KLbMJyg4JZg5SjbPfLGSrhwjp6lm7GEfauEoSZ1fiOIl
// SIG // XdMhSz5SxLVXPyQD8NF6Wy/VI+NwXQ9RRnez+ADhvKwC
// SIG // gl/bwBWzvRvUVUvnOaEP6SNJvBi4RHxF5MHDcnrgcuck
// SIG // 379GmcXvwhxX24ON7E1JMKerjt/sW5+v/N2wZuLBl4F7
// SIG // 7dbtS+dJKacTKKanfWeA5opieF+yL4TXV5xcv3coKPHt
// SIG // bcMojyyPQDdPweGFRInECUzF1KVDL3SV9274eCBYLBNd
// SIG // YJWaPk8zhNqwiBfenk70lrC8RqBsmNLg1oiMCwIDAQAB
// SIG // o4IB7TCCAekwEAYJKwYBBAGCNxUBBAMCAQAwHQYDVR0O
// SIG // BBYEFEhuZOVQBdOCqhc3NyK1bajKdQKVMBkGCSsGAQQB
// SIG // gjcUAgQMHgoAUwB1AGIAQwBBMAsGA1UdDwQEAwIBhjAP
// SIG // BgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFHItOgIx
// SIG // kEO5FAVO4eqnxzHRI4k0MFoGA1UdHwRTMFEwT6BNoEuG
// SIG // SWh0dHA6Ly9jcmwubWljcm9zb2Z0LmNvbS9wa2kvY3Js
// SIG // L3Byb2R1Y3RzL01pY1Jvb0NlckF1dDIwMTFfMjAxMV8w
// SIG // M18yMi5jcmwwXgYIKwYBBQUHAQEEUjBQME4GCCsGAQUF
// SIG // BzAChkJodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtp
// SIG // L2NlcnRzL01pY1Jvb0NlckF1dDIwMTFfMjAxMV8wM18y
// SIG // Mi5jcnQwgZ8GA1UdIASBlzCBlDCBkQYJKwYBBAGCNy4D
// SIG // MIGDMD8GCCsGAQUFBwIBFjNodHRwOi8vd3d3Lm1pY3Jv
// SIG // c29mdC5jb20vcGtpb3BzL2RvY3MvcHJpbWFyeWNwcy5o
// SIG // dG0wQAYIKwYBBQUHAgIwNB4yIB0ATABlAGcAYQBsAF8A
// SIG // cABvAGwAaQBjAHkAXwBzAHQAYQB0AGUAbQBlAG4AdAAu
// SIG // IB0wDQYJKoZIhvcNAQELBQADggIBAGfyhqWY4FR5Gi7T
// SIG // 2HRnIpsLlhHhY5KZQpZ90nkMkMFlXy4sPvjDctFtg/6+
// SIG // P+gKyju/R6mj82nbY78iNaWXXWWEkH2LRlBV2AySfNIa
// SIG // SxzzPEKLUtCw/WvjPgcuKZvmPRul1LUdd5Q54ulkyUQ9
// SIG // eHoj8xN9ppB0g430yyYCRirCihC7pKkFDJvtaPpoLpWg
// SIG // Kj8qa1hJYx8JaW5amJbkg/TAj/NGK978O9C9Ne9uJa7l
// SIG // ryft0N3zDq+ZKJeYTQ49C/IIidYfwzIY4vDFLc5bnrRJ
// SIG // OQrGCsLGra7lstnbFYhRRVg4MnEnGn+x9Cf43iw6IGmY
// SIG // slmJaG5vp7d0w0AFBqYBKig+gj8TTWYLwLNN9eGPfxxv
// SIG // FX1Fp3blQCplo8NdUmKGwx1jNpeG39rz+PIWoZon4c2l
// SIG // l9DuXWNB41sHnIc+BncG0QaxdR8UvmFhtfDcxhsEvt9B
// SIG // xw4o7t5lL+yX9qFcltgA1qFGvVnzl6UJS0gQmYAf0AAp
// SIG // xbGbpT9Fdx41xtKiop96eiL6SJUfq/tHI4D1nvi/a7dL
// SIG // l+LrdXga7Oo3mXkYS//WsyNodeav+vyL6wuA6mk7r/ww
// SIG // 7QRMjt/fdW1jkT3RnVZOT7+AVyKheBEyIXrvQQqxP/uo
// SIG // zKRdwaGIm1dxVk5IRcBCyZt2WwqASGv9eZ/BvW1taslS
// SIG // cxMNelDNMYIEqjCCBKYCAQEwgZUwfjELMAkGA1UEBhMC
// SIG // VVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcT
// SIG // B1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jw
// SIG // b3JhdGlvbjEoMCYGA1UEAxMfTWljcm9zb2Z0IENvZGUg
// SIG // U2lnbmluZyBQQ0EgMjAxMQITMwAAAMMOm6fYstz3LAAA
// SIG // AAAAwzAJBgUrDgMCGgUAoIG+MBkGCSqGSIb3DQEJAzEM
// SIG // BgorBgEEAYI3AgEEMBwGCisGAQQBgjcCAQsxDjAMBgor
// SIG // BgEEAYI3AgEVMCMGCSqGSIb3DQEJBDEWBBQ0iwf4s4ft
// SIG // Uhl8pxFKf5VHI8ZHTzBeBgorBgEEAYI3AgEMMVAwTqAm
// SIG // gCQATQBpAGMAcgBvAHMAbwBmAHQAIABMAGUAYQByAG4A
// SIG // aQBuAGehJIAiaHR0cDovL3d3dy5taWNyb3NvZnQuY29t
// SIG // L2xlYXJuaW5nIDANBgkqhkiG9w0BAQEFAASCAQC4BOVq
// SIG // nLDZQW2rQaLCLphdoBtHA4FehuG255BBJSSw0YQvAOma
// SIG // uQcGLMij08Bv9tMGDlvlPBtKZ6hZuzlcGek3S+50zMl6
// SIG // 67cRqCG4xS9egWBdPv4xHMaDOlp/6+b8fPoVIanxYh7p
// SIG // VuoZuqG9b0gmCKccex8tI6HvWfxHcZ/bM6T0bmCwjsUk
// SIG // d5atKNd0zQMFhOrWkb6eK1f8ZUhALD68hTYWCfcMIGKO
// SIG // knNB2hLEujQr48gqYduhBG2rDuuXUW2rU3aXfIvxdOm6
// SIG // 78d86H1oxoArbimSO+Mve8/zR9/e05qMpCOx8wAuPqFc
// SIG // IjdMsUAomBmrg1UT6yDfi+jf1i1ooYICKDCCAiQGCSqG
// SIG // SIb3DQEJBjGCAhUwggIRAgEBMIGOMHcxCzAJBgNVBAYT
// SIG // AlVTMRMwEQYDVQQIEwpXYXNoaW5ndG9uMRAwDgYDVQQH
// SIG // EwdSZWRtb25kMR4wHAYDVQQKExVNaWNyb3NvZnQgQ29y
// SIG // cG9yYXRpb24xITAfBgNVBAMTGE1pY3Jvc29mdCBUaW1l
// SIG // LVN0YW1wIFBDQQITMwAAAMDeLD0HlORJeQAAAAAAwDAJ
// SIG // BgUrDgMCGgUAoF0wGAYJKoZIhvcNAQkDMQsGCSqGSIb3
// SIG // DQEHATAcBgkqhkiG9w0BCQUxDxcNMTcxMjI3MDc0NzM5
// SIG // WjAjBgkqhkiG9w0BCQQxFgQUXr1B65kJvxCBxQAVXOdj
// SIG // 5qhU9AEwDQYJKoZIhvcNAQEFBQAEggEADxShk/WfQZ7i
// SIG // M+J9fi3DU4WYZcu4Cdn2Mcn/1FYZF6h0ma1TlKN6tm+U
// SIG // gabPSj34yRpz2PFBcwE8bNHMUD+BcMeKclh0P6hp2QSL
// SIG // DmjMaYurZISISLpI1MQG/j9FPHwN9iFcaGXBQFTT8R4z
// SIG // kH39J0AhyHfQMN22pJXM90ZCmApaE0ewbD7+pGbPSY1g
// SIG // oYU+BE/k30Gu6czDeRUA/2cTKp2y7aql5o+E2zJfmqdC
// SIG // ynNPVM6A13CzU1cBVI9LLyNQ9WTotcf5HlNo8VxliqGU
// SIG // HQA1k0dyja4KdGtO0jgvrj/5bC/SvWIPT44rqkfMmb5a
// SIG // j4kpN+WSiDWkKTZWatjLig==
// SIG // End signature block
| b |
client.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
client: smithy_client::Client<C, M, R>,
conf: crate::Config,
}
/// An ergonomic service client for `Parrot_v1`.
///
/// This client allows ergonomic access to a `Parrot_v1`-shaped service.
/// Each method corresponds to an endpoint defined in the service's Smithy model,
/// and the request and response shapes are auto-generated from that same model.
///
/// # Using a Client
///
/// Once you have a client set up, you can access the service's endpoints
/// by calling the appropriate method on [`Client`]. Each such method
/// returns a request builder for that endpoint, with methods for setting
/// the various fields of the request. Once your request is complete, use
/// the `send` method to send the request. `send` returns a future, which
/// you then have to `.await` to get the service's response.
///
/// [builder pattern]: https://rust-lang.github.io/api-guidelines/type-safety.html#c-builder
/// [SigV4-signed requests]: https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
#[derive(std::fmt::Debug)]
pub struct Client<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use smithy_client::Builder;
impl<C, M, R> From<smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
pub fn with_config(client: smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub fn delete_lexicon(&self) -> fluent_builders::DeleteLexicon<C, M, R> {
fluent_builders::DeleteLexicon::new(self.handle.clone())
}
pub fn describe_voices(&self) -> fluent_builders::DescribeVoices<C, M, R> {
fluent_builders::DescribeVoices::new(self.handle.clone())
}
pub fn get_lexicon(&self) -> fluent_builders::GetLexicon<C, M, R> {
fluent_builders::GetLexicon::new(self.handle.clone())
}
pub fn get_speech_synthesis_task(&self) -> fluent_builders::GetSpeechSynthesisTask<C, M, R> {
fluent_builders::GetSpeechSynthesisTask::new(self.handle.clone())
}
pub fn list_lexicons(&self) -> fluent_builders::ListLexicons<C, M, R> {
fluent_builders::ListLexicons::new(self.handle.clone())
}
pub fn list_speech_synthesis_tasks(
&self,
) -> fluent_builders::ListSpeechSynthesisTasks<C, M, R> {
fluent_builders::ListSpeechSynthesisTasks::new(self.handle.clone())
}
pub fn put_lexicon(&self) -> fluent_builders::PutLexicon<C, M, R> {
fluent_builders::PutLexicon::new(self.handle.clone())
}
pub fn start_speech_synthesis_task(
&self,
) -> fluent_builders::StartSpeechSynthesisTask<C, M, R> {
fluent_builders::StartSpeechSynthesisTask::new(self.handle.clone())
}
pub fn synthesize_speech(&self) -> fluent_builders::SynthesizeSpeech<C, M, R> {
fluent_builders::SynthesizeSpeech::new(self.handle.clone())
}
}
pub mod fluent_builders {
#[derive(std::fmt::Debug)]
pub struct DeleteLexicon<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::delete_lexicon_input::Builder,
}
impl<C, M, R> DeleteLexicon<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DeleteLexiconOutput,
smithy_http::result::SdkError<crate::error::DeleteLexiconError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::DeleteLexiconInputOperationOutputAlias,
crate::output::DeleteLexiconOutput,
crate::error::DeleteLexiconError,
crate::input::DeleteLexiconInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The name of the lexicon to delete. Must be an existing lexicon in
/// the region.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct DescribeVoices<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::describe_voices_input::Builder,
}
impl<C, M, R> DescribeVoices<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::DescribeVoicesOutput,
smithy_http::result::SdkError<crate::error::DescribeVoicesError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::DescribeVoicesInputOperationOutputAlias,
crate::output::DescribeVoicesOutput,
crate::error::DescribeVoicesError,
crate::input::DescribeVoicesInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Specifies the engine (<code>standard</code> or <code>neural</code>)
/// used by Amazon Polly when processing input text for speech synthesis. </p>
pub fn engine(mut self, inp: crate::model::Engine) -> Self {
self.inner = self.inner.engine(inp);
self
}
pub fn set_engine(mut self, input: std::option::Option<crate::model::Engine>) -> Self {
self.inner = self.inner.set_engine(input);
self
}
/// <p> The language identification tag (ISO 639 code for the language
/// name-ISO 3166 country code) for filtering the list of voices returned. If
/// you don't specify this optional parameter, all available voices are
/// returned. </p>
pub fn language_code(mut self, inp: crate::model::LanguageCode) -> Self {
self.inner = self.inner.language_code(inp);
self
}
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.inner = self.inner.set_language_code(input);
self
}
/// <p>Boolean value indicating whether to return any bilingual voices that
/// use the specified language as an additional language. For instance, if you
/// request all languages that use US English (es-US), and there is an Italian
/// voice that speaks both Italian (it-IT) and US English, that voice will be
/// included if you specify <code>yes</code> but not if you specify
/// <code>no</code>.</p>
pub fn include_additional_language_codes(mut self, inp: bool) -> Self {
self.inner = self.inner.include_additional_language_codes(inp);
self
}
pub fn set_include_additional_language_codes(
mut self,
input: std::option::Option<bool>,
) -> Self {
self.inner = self.inner.set_include_additional_language_codes(input);
self
}
/// <p>An opaque pagination token returned from the previous
/// <code>DescribeVoices</code> operation. If present, this indicates where
/// to continue the listing.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetLexicon<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_lexicon_input::Builder,
}
impl<C, M, R> GetLexicon<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetLexiconOutput,
smithy_http::result::SdkError<crate::error::GetLexiconError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetLexiconInputOperationOutputAlias,
crate::output::GetLexiconOutput,
crate::error::GetLexiconError,
crate::input::GetLexiconInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Name of the lexicon.</p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct GetSpeechSynthesisTask<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_speech_synthesis_task_input::Builder,
}
impl<C, M, R> GetSpeechSynthesisTask<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetSpeechSynthesisTaskOutput,
smithy_http::result::SdkError<crate::error::GetSpeechSynthesisTaskError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetSpeechSynthesisTaskInputOperationOutputAlias,
crate::output::GetSpeechSynthesisTaskOutput,
crate::error::GetSpeechSynthesisTaskError,
crate::input::GetSpeechSynthesisTaskInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>The Amazon Polly generated identifier for a speech synthesis task.</p>
pub fn task_id(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.task_id(inp);
self
}
pub fn set_task_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_task_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListLexicons<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_lexicons_input::Builder,
}
impl<C, M, R> ListLexicons<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListLexiconsOutput,
smithy_http::result::SdkError<crate::error::ListLexiconsError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListLexiconsInputOperationOutputAlias,
crate::output::ListLexiconsOutput,
crate::error::ListLexiconsError,
crate::input::ListLexiconsInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>An opaque pagination token returned from previous
/// <code>ListLexicons</code> operation. If present, indicates where to
/// continue the list of lexicons.</p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct ListSpeechSynthesisTasks<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_speech_synthesis_tasks_input::Builder,
}
impl<C, M, R> ListSpeechSynthesisTasks<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListSpeechSynthesisTasksOutput,
smithy_http::result::SdkError<crate::error::ListSpeechSynthesisTasksError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListSpeechSynthesisTasksInputOperationOutputAlias,
crate::output::ListSpeechSynthesisTasksOutput,
crate::error::ListSpeechSynthesisTasksError,
crate::input::ListSpeechSynthesisTasksInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Maximum number of speech synthesis tasks returned in a List
/// operation.</p>
pub fn max_results(mut self, inp: i32) -> Self {
self.inner = self.inner.max_results(inp);
self
}
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
/// <p>The pagination token to use in the next request to continue the
/// listing of speech synthesis tasks. </p>
pub fn next_token(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(inp);
self
}
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p>Status of the speech synthesis tasks returned in a List
/// operation</p>
pub fn status(mut self, inp: crate::model::TaskStatus) -> Self {
self.inner = self.inner.status(inp);
self
}
pub fn set_status(mut self, input: std::option::Option<crate::model::TaskStatus>) -> Self {
self.inner = self.inner.set_status(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct PutLexicon<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_lexicon_input::Builder,
}
impl<C, M, R> PutLexicon<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutLexiconOutput,
smithy_http::result::SdkError<crate::error::PutLexiconError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutLexiconInputOperationOutputAlias,
crate::output::PutLexiconOutput,
crate::error::PutLexiconError,
crate::input::PutLexiconInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Name of the lexicon. The name must follow the regular express
/// format [0-9A-Za-z]{1,20}. That is, the name is a case-sensitive
/// alphanumeric string up to 20 characters long. </p>
pub fn name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(inp);
self
}
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p>Content of the PLS lexicon as string data.</p>
pub fn content(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.content(inp);
self
}
pub fn set_content(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_content(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct StartSpeechSynthesisTask<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::start_speech_synthesis_task_input::Builder,
}
impl<C, M, R> StartSpeechSynthesisTask<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::StartSpeechSynthesisTaskOutput,
smithy_http::result::SdkError<crate::error::StartSpeechSynthesisTaskError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::StartSpeechSynthesisTaskInputOperationOutputAlias,
crate::output::StartSpeechSynthesisTaskOutput,
crate::error::StartSpeechSynthesisTaskError,
crate::input::StartSpeechSynthesisTaskInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Specifies the engine (<code>standard</code> or <code>neural</code>)
/// for Amazon Polly to use when processing input text for speech synthesis. Using a
/// voice that is not supported for the engine selected will result in an
/// error.</p>
pub fn engine(mut self, inp: crate::model::Engine) -> Self {
self.inner = self.inner.engine(inp);
self
}
pub fn set_engine(mut self, input: std::option::Option<crate::model::Engine>) -> Self {
self.inner = self.inner.set_engine(input);
self
}
/// <p>Optional language code for the Speech Synthesis request. This is only
/// necessary if using a bilingual voice, such as Aditi, which can be used for
/// either Indian English (en-IN) or Hindi (hi-IN). </p>
/// <p>If a bilingual voice is used and no language code is specified, Amazon Polly
/// uses the default language of the bilingual voice. The default language for
/// any voice is the one returned by the <a href="https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html">DescribeVoices</a> operation for the <code>LanguageCode</code>
/// parameter. For example, if no language code is specified, Aditi will use
/// Indian English rather than Hindi.</p>
pub fn language_code(mut self, inp: crate::model::LanguageCode) -> Self {
self.inner = self.inner.language_code(inp);
self
}
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.inner = self.inner.set_language_code(input);
self
}
/// Appends an item to `LexiconNames`.
///
/// To override the contents of this collection use [`set_lexicon_names`](Self::set_lexicon_names).
/// <p>List of one or more pronunciation lexicon names you want the service
/// to apply during synthesis. Lexicons are applied only if the language of
/// the lexicon is the same as the language of the voice. </p>
pub fn lexicon_names(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.lexicon_names(inp);
self
}
pub fn set_lexicon_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_lexicon_names(input);
self
}
/// <p>The format in which the returned output will be encoded. For audio
/// stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will
/// be json. </p>
pub fn output_format(mut self, inp: crate::model::OutputFormat) -> Self {
self.inner = self.inner.output_format(inp);
self
}
pub fn set_output_format(
mut self,
input: std::option::Option<crate::model::OutputFormat>,
) -> Self {
self.inner = self.inner.set_output_format(input);
self
}
/// <p>Amazon S3 bucket name to which the output file will be saved.</p>
pub fn output_s3_bucket_name(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.output_s3_bucket_name(inp);
self
}
pub fn set_output_s3_bucket_name(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_output_s3_bucket_name(input);
self
}
/// <p>The Amazon S3 key prefix for the output speech file.</p>
pub fn output_s3_key_prefix(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.output_s3_key_prefix(inp);
self
}
pub fn set_output_s3_key_prefix(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_output_s3_key_prefix(input);
self
}
/// <p>The audio frequency specified in Hz.</p>
/// <p>The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050",
/// and "24000". The default value for standard voices is "22050". The default
/// value for neural voices is "24000".</p>
/// <p>Valid values for pcm are "8000" and "16000" The default value is
/// "16000". </p>
pub fn sample_rate(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.sample_rate(inp);
self
}
pub fn set_sample_rate(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_sample_rate(input);
self
}
/// <p>ARN for the SNS topic optionally used for providing status
/// notification for a speech synthesis task.</p>
pub fn sns_topic_arn(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.sns_topic_arn(inp);
self
}
pub fn set_sns_topic_arn(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_sns_topic_arn(input);
self
}
/// Appends an item to `SpeechMarkTypes`.
///
/// To override the contents of this collection use [`set_speech_mark_types`](Self::set_speech_mark_types).
/// <p>The type of speech marks returned for the input text.</p>
pub fn speech_mark_types(mut self, inp: impl Into<crate::model::SpeechMarkType>) -> Self {
self.inner = self.inner.speech_mark_types(inp);
self
}
pub fn set_speech_mark_types(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SpeechMarkType>>,
) -> Self {
self.inner = self.inner.set_speech_mark_types(input);
self
}
/// <p>The input text to synthesize. If you specify ssml as the TextType,
/// follow the SSML format for the input text. </p>
pub fn text(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.text(inp);
self
}
pub fn set_text(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_text(input);
self
}
/// <p>Specifies whether the input text is plain text or SSML. The default
/// value is plain text. </p>
pub fn text_type(mut self, inp: crate::model::TextType) -> Self {
self.inner = self.inner.text_type(inp);
self
}
pub fn set_text_type(mut self, input: std::option::Option<crate::model::TextType>) -> Self {
self.inner = self.inner.set_text_type(input);
self
}
/// <p>Voice ID to use for the synthesis. </p>
pub fn voice_id(mut self, inp: crate::model::VoiceId) -> Self {
self.inner = self.inner.voice_id(inp);
self
}
pub fn set_voice_id(mut self, input: std::option::Option<crate::model::VoiceId>) -> Self {
self.inner = self.inner.set_voice_id(input);
self
}
}
#[derive(std::fmt::Debug)]
pub struct SynthesizeSpeech<
C = smithy_client::erase::DynConnector,
M = aws_hyper::AwsMiddleware,
R = smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::synthesize_speech_input::Builder,
}
impl<C, M, R> SynthesizeSpeech<C, M, R>
where
C: smithy_client::bounds::SmithyConnector,
M: smithy_client::bounds::SmithyMiddleware<C>,
R: smithy_client::retry::NewRequestPolicy,
{
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
pub async fn send(
self,
) -> std::result::Result<
crate::output::SynthesizeSpeechOutput,
smithy_http::result::SdkError<crate::error::SynthesizeSpeechError>,
>
where
R::Policy: smithy_client::bounds::SmithyRetryPolicy<
crate::input::SynthesizeSpeechInputOperationOutputAlias,
crate::output::SynthesizeSpeechOutput,
crate::error::SynthesizeSpeechError,
crate::input::SynthesizeSpeechInputOperationRetryAlias,
>,
{
let input = self
.inner
.build()
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
let op = input
.make_operation(&self.handle.conf)
.map_err(|err| smithy_http::result::SdkError::ConstructionFailure(err.into()))?;
self.handle.client.call(op).await
}
/// <p>Specifies the engine (<code>standard</code> or <code>neural</code>)
/// for Amazon Polly to use when processing input text for speech synthesis. For
/// information on Amazon Polly voices and which voices are available in
/// standard-only, NTTS-only, and both standard and NTTS formats, see <a href="https://docs.aws.amazon.com/polly/latest/dg/voicelist.html">Available Voices</a>.</p>
/// <p>
/// <b>NTTS-only voices</b>
/// </p>
/// <p>When using NTTS-only voices such as Kevin (en-US), this parameter is
/// required and must be set to <code>neural</code>. If the engine is not
/// specified, or is set to <code>standard</code>, this will result in an
/// error. </p>
/// <p>Type: String</p>
/// <p>Valid Values: <code>standard</code> | <code>neural</code>
/// </p>
/// <p>Required: Yes</p>
/// <p>
/// <b>Standard voices</b>
/// </p>
/// <p>For standard voices, this is not required; the engine parameter
/// defaults to <code>standard</code>. If the engine is not specified, or is
/// set to <code>standard</code> and an NTTS-only voice is selected, this will
/// result in an error. </p>
pub fn engine(mut self, inp: crate::model::Engine) -> Self {
self.inner = self.inner.engine(inp);
self
}
pub fn set_engine(mut self, input: std::option::Option<crate::model::Engine>) -> Self {
self.inner = self.inner.set_engine(input);
self
}
/// <p>Optional language code for the Synthesize Speech request. This is only
/// necessary if using a bilingual voice, such as Aditi, which can be used for
/// either Indian English (en-IN) or Hindi (hi-IN). </p>
/// <p>If a bilingual voice is used and no language code is specified, Amazon Polly
/// uses the default language of the bilingual voice. The default language for
/// any voice is the one returned by the <a href="https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html">DescribeVoices</a> operation for the <code>LanguageCode</code>
/// parameter. For example, if no language code is specified, Aditi will use
/// Indian English rather than Hindi.</p>
pub fn language_code(mut self, inp: crate::model::LanguageCode) -> Self {
self.inner = self.inner.language_code(inp);
self
}
pub fn set_language_code(
mut self,
input: std::option::Option<crate::model::LanguageCode>,
) -> Self {
self.inner = self.inner.set_language_code(input);
self
}
/// Appends an item to `LexiconNames`.
///
/// To override the contents of this collection use [`set_lexicon_names`](Self::set_lexicon_names).
/// <p>List of one or more pronunciation lexicon names you want the
/// service to apply during synthesis. Lexicons are applied only if the
/// language of the lexicon is the same as the language of the voice. For
/// information about storing lexicons, see <a href="https://docs.aws.amazon.com/polly/latest/dg/API_PutLexicon.html">PutLexicon</a>.</p>
pub fn lexicon_names(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.lexicon_names(inp);
self
}
pub fn set_lexicon_names(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.inner = self.inner.set_lexicon_names(input);
self
}
/// <p> The format in which the returned output will be encoded. For audio
/// stream, this will be mp3, ogg_vorbis, or pcm. For speech marks, this will
/// be json. </p>
/// <p>When pcm is used, the content returned is audio/pcm in a signed
/// 16-bit, 1 channel (mono), little-endian format. </p>
pub fn output_format(mut self, inp: crate::model::OutputFormat) -> Self {
self.inner = self.inner.output_format(inp);
self
}
pub fn set_output_format(
mut self,
input: std::option::Option<crate::model::OutputFormat>,
) -> Self {
self.inner = self.inner.set_output_format(input);
self
}
/// <p>The audio frequency specified in Hz.</p>
/// <p>The valid values for mp3 and ogg_vorbis are "8000", "16000", "22050",
/// and "24000". The default value for standard voices is "22050". The default
/// value for neural voices is "24000".</p>
/// <p>Valid values for pcm are "8000" and "16000" The default value is
/// "16000". </p>
pub fn sample_rate(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.sample_rate(inp);
self
}
pub fn set_sample_rate(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_sample_rate(input);
self
}
/// Appends an item to `SpeechMarkTypes`.
///
/// To override the contents of this collection use [`set_speech_mark_types`](Self::set_speech_mark_types).
/// <p>The type of speech marks returned for the input text.</p>
pub fn speech_mark_types(mut self, inp: impl Into<crate::model::SpeechMarkType>) -> Self {
self.inner = self.inner.speech_mark_types(inp);
self
}
pub fn set_speech_mark_types(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SpeechMarkType>>,
) -> Self {
self.inner = self.inner.set_speech_mark_types(input);
self
}
/// <p> Input text to synthesize. If you specify <code>ssml</code> as the
/// <code>TextType</code>, follow the SSML format for the input text.
/// </p>
pub fn text(mut self, inp: impl Into<std::string::String>) -> Self {
self.inner = self.inner.text(inp);
self
}
pub fn | (mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_text(input);
self
}
/// <p> Specifies whether the input text is plain text or SSML. The
/// default value is plain text. For more information, see <a href="https://docs.aws.amazon.com/polly/latest/dg/ssml.html">Using
/// SSML</a>.</p>
pub fn text_type(mut self, inp: crate::model::TextType) -> Self {
self.inner = self.inner.text_type(inp);
self
}
pub fn set_text_type(mut self, input: std::option::Option<crate::model::TextType>) -> Self {
self.inner = self.inner.set_text_type(input);
self
}
/// <p> Voice ID to use for the synthesis. You can get a list of available
/// voice IDs by calling the <a href="https://docs.aws.amazon.com/polly/latest/dg/API_DescribeVoices.html">DescribeVoices</a> operation. </p>
pub fn voice_id(mut self, inp: crate::model::VoiceId) -> Self {
self.inner = self.inner.voice_id(inp);
self
}
pub fn set_voice_id(mut self, input: std::option::Option<crate::model::VoiceId>) -> Self {
self.inner = self.inner.set_voice_id(input);
self
}
}
}
impl<C> Client<C, aws_hyper::AwsMiddleware, smithy_client::retry::Standard> {
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let client = aws_hyper::Client::new(conn);
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
smithy_client::erase::DynConnector,
aws_hyper::AwsMiddleware,
smithy_client::retry::Standard,
>
{
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let client = aws_hyper::Client::https();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
| set_text |
term.go | // Generated automatically. DO NOT HAND-EDIT.
package aterm
import "github.com/gdamore/tcell/v2/terminfo"
func | () {
// AfterStep terminal
terminfo.AddTerminfo(&terminfo.Terminfo{
Name: "aterm",
Columns: 80,
Lines: 24,
Colors: 8,
Bell: "\a",
Clear: "\x1b[H\x1b[2J",
EnterCA: "\x1b7\x1b[?47h",
ExitCA: "\x1b[2J\x1b[?47l\x1b8",
ShowCursor: "\x1b[?25h",
HideCursor: "\x1b[?25l",
AttrOff: "\x1b[m\x0f",
Underline: "\x1b[4m",
Bold: "\x1b[1m",
Blink: "\x1b[5m",
Reverse: "\x1b[7m",
EnterKeypad: "\x1b=",
ExitKeypad: "\x1b>",
SetFg: "\x1b[3%p1%dm",
SetBg: "\x1b[4%p1%dm",
SetFgBg: "\x1b[3%p1%d;4%p2%dm",
PadChar: "\x00",
AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~",
EnterAcs: "\x0e",
ExitAcs: "\x0f",
EnableAcs: "\x1b(B\x1b)0",
Mouse: "\x1b[M",
MouseMode: "%?%p1%{1}%=%t%'h'%Pa%e%'l'%Pa%;\x1b[?1000%ga%c\x1b[?1002%ga%c\x1b[?1003%ga%c\x1b[?1006%ga%c",
SetCursor: "\x1b[%i%p1%d;%p2%dH",
CursorBack1: "\b",
CursorUp1: "\x1b[A",
KeyUp: "\x1b[A",
KeyDown: "\x1b[B",
KeyRight: "\x1b[C",
KeyLeft: "\x1b[D",
KeyInsert: "\x1b[2~",
KeyDelete: "\x1b[3~",
KeyBackspace: "\u007f",
KeyHome: "\x1b[7~",
KeyEnd: "\x1b[8~",
KeyPgUp: "\x1b[5~",
KeyPgDn: "\x1b[6~",
KeyF1: "\x1bOP",
KeyF2: "\x1bOQ",
KeyF3: "\x1bOR",
KeyF4: "\x1bOS",
KeyF5: "\x1b[15~",
KeyF6: "\x1b[17~",
KeyF7: "\x1b[18~",
KeyF8: "\x1b[19~",
KeyF9: "\x1b[20~",
KeyF10: "\x1b[21~",
KeyF11: "\x1b[23~",
KeyF12: "\x1b[24~",
KeyF13: "\x1b[25~",
KeyF14: "\x1b[26~",
KeyF15: "\x1b[28~",
KeyF16: "\x1b[29~",
KeyF17: "\x1b[31~",
KeyF18: "\x1b[32~",
KeyF19: "\x1b[33~",
KeyF20: "\x1b[34~",
KeyF21: "\x1b[23$",
KeyF22: "\x1b[24$",
KeyF23: "\x1b[11^",
KeyF24: "\x1b[12^",
KeyF25: "\x1b[13^",
KeyF26: "\x1b[14^",
KeyF27: "\x1b[15^",
KeyF28: "\x1b[17^",
KeyF29: "\x1b[18^",
KeyF30: "\x1b[19^",
KeyF31: "\x1b[20^",
KeyF32: "\x1b[21^",
KeyF33: "\x1b[23^",
KeyF34: "\x1b[24^",
KeyF35: "\x1b[25^",
KeyF36: "\x1b[26^",
KeyF37: "\x1b[28^",
KeyF38: "\x1b[29^",
KeyF39: "\x1b[31^",
KeyF40: "\x1b[32^",
KeyF41: "\x1b[33^",
KeyF42: "\x1b[34^",
KeyF43: "\x1b[23@",
KeyF44: "\x1b[24@",
KeyBacktab: "\x1b[Z",
KeyShfLeft: "\x1b[d",
KeyShfRight: "\x1b[c",
KeyShfUp: "\x1b[a",
KeyShfDown: "\x1b[b",
KeyCtrlLeft: "\x1b[Od",
KeyCtrlRight: "\x1b[Oc",
KeyCtrlUp: "\x1b[Oa",
KeyCtrlDown: "\x1b[Ob",
KeyShfHome: "\x1b[7$",
KeyShfEnd: "\x1b[8$",
KeyCtrlHome: "\x1b[7^",
KeyCtrlEnd: "\x1b[8^",
})
}
| init |
main.rs | #![forbid(unsafe_code)]
#![allow(irrefutable_let_patterns)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::large_enum_variant)]
// These are useless.
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::inconsistent_struct_constructor)]
extern crate rg3d;
#[macro_use]
extern crate lazy_static;
extern crate directories;
pub mod asset;
pub mod camera;
pub mod command;
pub mod configurator;
pub mod gui;
pub mod interaction;
pub mod light;
pub mod log;
pub mod material;
pub mod menu;
pub mod overlay;
pub mod physics;
pub mod preview;
pub mod project_dirs;
pub mod scene;
pub mod settings;
pub mod sidebar;
pub mod sound;
pub mod utils;
pub mod world_outliner;
use crate::{
asset::{AssetBrowser, AssetKind},
camera::CameraController,
command::{CommandStack, CommandStackViewer},
configurator::Configurator,
gui::{
make_dropdown_list_option, BuildContext, EditorUiMessage, EditorUiNode, Ui, UiMessage,
UiNode,
},
interaction::{
move_mode::MoveInteractionMode,
navmesh::{
data_model::{Navmesh, NavmeshTriangle, NavmeshVertex},
EditNavmeshMode, NavmeshPanel,
},
rotate_mode::RotateInteractionMode,
scale_mode::ScaleInteractionMode,
select_mode::SelectInteractionMode,
terrain::TerrainInteractionMode,
InteractionMode, InteractionModeKind, InteractionModeTrait,
},
light::LightPanel,
log::Log,
material::MaterialEditor,
menu::{Menu, MenuContext},
overlay::OverlayRenderPass,
physics::Physics,
scene::{
commands::{
graph::LoadModelCommand, make_delete_selection_command, mesh::SetMeshTextureCommand,
particle_system::SetParticleSystemTextureCommand, sound::DeleteSoundSourceCommand,
sprite::SetSpriteTextureCommand, ChangeSelectionCommand, CommandGroup, PasteCommand,
SceneCommand, SceneContext,
},
EditorScene, Selection,
},
settings::{Settings, SettingsSectionKind},
sidebar::SideBar,
sound::SoundPanel,
utils::path_fixer::PathFixer,
world_outliner::WorldOutliner,
};
use rg3d::{
core::{
algebra::{Point3, Vector2},
color::Color,
math::aabb::AxisAlignedBoundingBox,
pool::{Handle, Pool},
scope_profile,
},
dpi::LogicalSize,
engine::resource_manager::{MaterialSearchOptions, TextureImportOptions},
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoop},
gui::{
border::BorderBuilder,
brush::Brush,
button::ButtonBuilder,
canvas::CanvasBuilder,
dock::{DockingManagerBuilder, TileBuilder, TileContent},
draw,
dropdown_list::DropdownListBuilder,
file_browser::{FileBrowserMode, FileSelectorBuilder, Filter},
grid::{Column, GridBuilder, Row},
image::ImageBuilder,
message::{
ButtonMessage, FileSelectorMessage, ImageMessage, KeyCode, MessageBoxMessage,
MessageDirection, MouseButton, UiMessageData, WidgetMessage, WindowMessage,
},
message::{DropdownListMessage, TextBoxMessage},
messagebox::{MessageBoxBuilder, MessageBoxButtons, MessageBoxResult},
stack_panel::StackPanelBuilder,
text::TextBuilder,
text_box::TextBoxBuilder,
ttf::Font,
widget::WidgetBuilder,
window::{WindowBuilder, WindowTitle},
HorizontalAlignment, Orientation, Thickness, VerticalAlignment,
},
material::{Material, PropertyValue},
resource::texture::{CompressionOptions, Texture, TextureKind, TextureState},
scene::{
base::BaseBuilder,
debug::{Line, SceneDrawingContext},
graph::Graph,
mesh::{
buffer::{VertexAttributeUsage, VertexReadTrait},
Mesh,
},
node::Node,
Scene,
},
utils::{into_gui_texture, log::MessageKind, translate_cursor_icon, translate_event},
};
use std::{
fs,
io::Write,
path::{Path, PathBuf},
str::from_utf8,
sync::{
mpsc::{self, Receiver, Sender},
Arc, Mutex,
},
time::Instant,
};
pub const MSG_SYNC_FLAG: u64 = 1;
pub fn send_sync_message(ui: &Ui, mut msg: UiMessage) {
msg.flags = MSG_SYNC_FLAG;
ui.send_message(msg);
}
type GameEngine = rg3d::engine::Engine<EditorUiMessage, EditorUiNode>;
lazy_static! {
// This checks release.toml debug handle and at
// the same time checks if program is installed
static ref DEBUG_HANDLE: bool = {
let release_toml = project_dirs::resources_dir("release.toml");
if release_toml.exists() {
let file = fs::read(release_toml).unwrap();
from_utf8(&file)
.unwrap()
.parse::<toml::Value>()
.unwrap()["debug-mode"]
.as_bool()
.unwrap()
} else {
true
}
};
// This constant gives DEBUG_HANDLE value to config_dir and data_dir
// functions and checks if config and data dir are created.
static ref TEST_EXISTENCE: bool = {
if !(*DEBUG_HANDLE) {
// We check if config and data dir exists
if !project_dirs::data_dir("").exists() {
// If there's aren't any, we create them.
fs::create_dir(project_dirs::config_dir("")).unwrap();
fs::create_dir(project_dirs::data_dir("")).unwrap();
}
true
} else {
false
}
};
static ref CONFIG_DIR: Mutex<PathBuf> = Mutex::new(project_dirs::working_config_dir(""));
static ref DATA_DIR: Mutex<PathBuf> = Mutex::new(project_dirs::working_data_dir(""));
}
pub fn load_image(data: &[u8]) -> Option<draw::SharedTexture> {
Some(into_gui_texture(
Texture::load_from_memory(data, CompressionOptions::NoCompression).ok()?,
))
}
pub fn | (color: Color) -> Arc<Mutex<Material>> {
let mut material = Material::standard();
material
.set_property("diffuseColor", PropertyValue::Color(color))
.unwrap();
Arc::new(Mutex::new(material))
}
pub fn set_mesh_diffuse_color(mesh: &mut Mesh, color: Color) {
for surface in mesh.surfaces() {
surface
.material()
.lock()
.unwrap()
.set_property("diffuseColor", PropertyValue::Color(color))
.unwrap();
}
}
pub fn create_terrain_layer_material() -> Arc<Mutex<Material>> {
let mut material = Material::standard_terrain();
material
.set_property(
"texCoordScale",
PropertyValue::Vector2(Vector2::new(10.0, 10.0)),
)
.unwrap();
Arc::new(Mutex::new(material))
}
pub struct ScenePreview {
frame: Handle<UiNode>,
window: Handle<UiNode>,
last_mouse_pos: Option<Vector2<f32>>,
click_mouse_pos: Option<Vector2<f32>>,
selection_frame: Handle<UiNode>,
// Side bar stuff
select_mode: Handle<UiNode>,
move_mode: Handle<UiNode>,
rotate_mode: Handle<UiNode>,
scale_mode: Handle<UiNode>,
navmesh_mode: Handle<UiNode>,
terrain_mode: Handle<UiNode>,
sender: Sender<Message>,
}
pub fn make_relative_path<P: AsRef<Path>>(path: P) -> PathBuf {
// Strip working directory from file name.
let relative_path = path
.as_ref()
.canonicalize()
.unwrap()
.strip_prefix(std::env::current_dir().unwrap().canonicalize().unwrap())
.unwrap()
.to_owned();
rg3d::core::replace_slashes(relative_path)
}
pub struct ModelImportDialog {
// View
pub window: Handle<UiNode>,
options: Handle<UiNode>,
path_field: Handle<UiNode>,
path_selector: Handle<UiNode>,
select_path: Handle<UiNode>,
ok: Handle<UiNode>,
cancel: Handle<UiNode>,
path_selection_section: Handle<UiNode>,
// Data model
model_path: PathBuf,
material_search_options: MaterialSearchOptions,
}
impl ModelImportDialog {
pub fn new(ctx: &mut BuildContext) -> Self {
let options;
let select_path;
let path_field;
let ok;
let cancel;
let path_selection_section;
let window = WindowBuilder::new(WidgetBuilder::new().with_width(400.0).with_height(135.0))
.open(false)
.with_title(WindowTitle::text("Import Model"))
.with_content(
GridBuilder::new(
WidgetBuilder::new()
.with_child(
TextBuilder::new(
WidgetBuilder::new()
.on_row(0)
.with_margin(Thickness::uniform(1.0)),
)
.with_text("Please select the material search options.")
.build(ctx),
)
.with_child(
GridBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0))
.on_row(1)
.with_child(
TextBuilder::new(WidgetBuilder::new().on_column(0))
.with_text("Options")
.with_vertical_text_alignment(VerticalAlignment::Center)
.build(ctx),
)
.with_child({
options = DropdownListBuilder::new(
WidgetBuilder::new().on_column(1),
)
.with_items(vec![
make_dropdown_list_option(ctx, "Recursive Up"),
make_dropdown_list_option(ctx, "Materials Directory"),
make_dropdown_list_option(ctx, "Working Directory"),
])
.with_selected(0)
.with_close_on_selection(true)
.build(ctx);
options
}),
)
.add_column(Column::strict(100.0))
.add_column(Column::stretch())
.add_row(Row::strict(26.0))
.build(ctx),
)
.with_child({
path_selection_section = GridBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0))
.on_row(2)
.with_visibility(false)
.with_child({
path_field = TextBoxBuilder::new(
WidgetBuilder::new().with_enabled(false).on_column(0),
)
.with_vertical_text_alignment(VerticalAlignment::Center)
.build(ctx);
path_field
})
.with_child({
select_path =
ButtonBuilder::new(WidgetBuilder::new().on_column(1))
.with_text("...")
.build(ctx);
select_path
}),
)
.add_column(Column::stretch())
.add_column(Column::strict(26.0))
.add_row(Row::strict(26.0))
.build(ctx);
path_selection_section
})
.with_child(
StackPanelBuilder::new(
WidgetBuilder::new()
.with_horizontal_alignment(HorizontalAlignment::Right)
.on_row(4)
.with_child({
ok = ButtonBuilder::new(
WidgetBuilder::new().with_width(100.0),
)
.with_text("OK")
.build(ctx);
ok
})
.with_child({
cancel = ButtonBuilder::new(
WidgetBuilder::new().with_width(100.0),
)
.with_text("Cancel")
.build(ctx);
cancel
}),
)
.with_orientation(Orientation::Horizontal)
.build(ctx),
),
)
.add_row(Row::auto())
.add_row(Row::auto())
.add_row(Row::auto())
.add_row(Row::stretch())
.add_row(Row::strict(26.0))
.add_column(Column::stretch())
.build(ctx),
)
.build(ctx);
let path_selector = FileSelectorBuilder::new(
WindowBuilder::new(WidgetBuilder::new().with_width(300.0).with_height(500.0))
.open(false),
)
.with_filter(Filter::new(|p: &Path| p.is_dir()))
.with_path(".")
.build(ctx);
Self {
window,
options,
ok,
cancel,
select_path,
path_selector,
path_field,
model_path: Default::default(),
path_selection_section,
material_search_options: MaterialSearchOptions::RecursiveUp,
}
}
pub fn set_working_directory(&mut self, engine: &mut GameEngine, dir: &Path) {
assert!(dir.is_dir());
engine
.user_interface
.send_message(FileSelectorMessage::root(
self.path_selector,
MessageDirection::ToWidget,
Some(dir.to_owned()),
));
}
pub fn open(&mut self, model_path: PathBuf, ui: &Ui) {
self.model_path = model_path;
ui.send_message(WindowMessage::open_modal(
self.window,
MessageDirection::ToWidget,
true,
));
}
pub fn handle_ui_message(&mut self, message: &UiMessage, ui: &Ui, sender: &Sender<Message>) {
match message.data() {
UiMessageData::Button(ButtonMessage::Click) => {
if message.destination() == self.ok {
ui.send_message(WindowMessage::close(
self.window,
MessageDirection::ToWidget,
));
sender
.send(Message::DoSceneCommand(SceneCommand::LoadModel(
LoadModelCommand::new(
self.model_path.clone(),
self.material_search_options.clone(),
),
)))
.unwrap();
} else if message.destination() == self.cancel {
ui.send_message(WindowMessage::close(
self.window,
MessageDirection::ToWidget,
));
} else if message.destination() == self.select_path {
ui.send_message(WindowMessage::open_modal(
self.path_selector,
MessageDirection::ToWidget,
true,
));
}
}
UiMessageData::DropdownList(DropdownListMessage::SelectionChanged(Some(value)))
if message.destination() == self.options =>
{
let show_path_selection_options = match *value {
0 => {
self.material_search_options = MaterialSearchOptions::RecursiveUp;
false
}
1 => {
self.material_search_options =
MaterialSearchOptions::MaterialsDirectory(PathBuf::from("."));
true
}
2 => {
self.material_search_options = MaterialSearchOptions::WorkingDirectory;
false
}
_ => unreachable!(),
};
ui.send_message(WidgetMessage::visibility(
self.path_selection_section,
MessageDirection::ToWidget,
show_path_selection_options,
));
}
UiMessageData::FileSelector(FileSelectorMessage::Commit(path))
if message.destination() == self.path_selector =>
{
ui.send_message(TextBoxMessage::text(
self.path_field,
MessageDirection::ToWidget,
path.to_string_lossy().to_string(),
));
self.material_search_options =
MaterialSearchOptions::MaterialsDirectory(path.clone());
}
_ => (),
}
}
}
impl ScenePreview {
pub fn new(engine: &mut GameEngine, sender: Sender<Message>) -> Self {
let ctx = &mut engine.user_interface.build_ctx();
let frame;
let select_mode;
let move_mode;
let rotate_mode;
let scale_mode;
let navmesh_mode;
let terrain_mode;
let selection_frame;
let window = WindowBuilder::new(WidgetBuilder::new())
.can_close(false)
.can_minimize(false)
.with_content(
GridBuilder::new(
WidgetBuilder::new()
.with_child({
frame = ImageBuilder::new(
WidgetBuilder::new()
.on_row(0)
.on_column(1)
.with_allow_drop(true),
)
.with_flip(true)
.build(ctx);
frame
})
.with_child(
CanvasBuilder::new(WidgetBuilder::new().on_column(1).with_child({
selection_frame = BorderBuilder::new(
WidgetBuilder::new()
.with_visibility(false)
.with_background(Brush::Solid(Color::from_rgba(
255, 255, 255, 40,
)))
.with_foreground(Brush::Solid(Color::opaque(0, 255, 0))),
)
.with_stroke_thickness(Thickness::uniform(1.0))
.build(ctx);
selection_frame
}))
.build(ctx),
)
.with_child(
StackPanelBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0))
.on_row(0)
.on_column(0)
.with_child({
select_mode = ButtonBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0)),
)
.with_content(
ImageBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0))
.with_width(32.0)
.with_height(32.0),
)
.with_opt_texture(load_image(include_bytes!(
"../resources/embed/select.png"
)))
.build(ctx),
)
.build(ctx);
select_mode
})
.with_child({
move_mode = ButtonBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0)),
)
.with_content(
ImageBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0))
.with_width(32.0)
.with_height(32.0),
)
.with_opt_texture(load_image(include_bytes!(
"../resources/embed/move_arrow.png"
)))
.build(ctx),
)
.build(ctx);
move_mode
})
.with_child({
rotate_mode = ButtonBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0)),
)
.with_content(
ImageBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0))
.with_width(32.0)
.with_height(32.0),
)
.with_opt_texture(load_image(include_bytes!(
"../resources/embed/rotate_arrow.png"
)))
.build(ctx),
)
.build(ctx);
rotate_mode
})
.with_child({
scale_mode = ButtonBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0)),
)
.with_content(
ImageBuilder::new(
WidgetBuilder::new()
.with_width(32.0)
.with_height(32.0),
)
.with_opt_texture(load_image(include_bytes!(
"../resources/embed/scale_arrow.png"
)))
.build(ctx),
)
.build(ctx);
scale_mode
})
.with_child({
navmesh_mode = ButtonBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0)),
)
.with_content(
ImageBuilder::new(
WidgetBuilder::new()
.with_width(32.0)
.with_height(32.0),
)
.with_opt_texture(load_image(include_bytes!(
"../resources/embed/navmesh.png"
)))
.build(ctx),
)
.build(ctx);
navmesh_mode
})
.with_child({
terrain_mode = ButtonBuilder::new(
WidgetBuilder::new()
.with_margin(Thickness::uniform(1.0)),
)
.with_content(
ImageBuilder::new(
WidgetBuilder::new()
.with_width(32.0)
.with_height(32.0),
)
.with_opt_texture(load_image(include_bytes!(
"../resources/embed/terrain.png"
)))
.build(ctx),
)
.build(ctx);
terrain_mode
}),
)
.build(ctx),
),
)
.add_row(Row::stretch())
.add_column(Column::auto())
.add_column(Column::stretch())
.build(ctx),
)
.with_title(WindowTitle::text("Scene Preview"))
.build(ctx);
Self {
sender,
window,
frame,
last_mouse_pos: None,
move_mode,
rotate_mode,
scale_mode,
selection_frame,
select_mode,
navmesh_mode,
terrain_mode,
click_mouse_pos: None,
}
}
}
impl ScenePreview {
fn handle_ui_message(&mut self, message: &UiMessage, ui: &Ui) {
scope_profile!();
match &message.data() {
UiMessageData::Button(ButtonMessage::Click) => {
if message.destination() == self.scale_mode {
self.sender
.send(Message::SetInteractionMode(InteractionModeKind::Scale))
.unwrap();
} else if message.destination() == self.rotate_mode {
self.sender
.send(Message::SetInteractionMode(InteractionModeKind::Rotate))
.unwrap();
} else if message.destination() == self.move_mode {
self.sender
.send(Message::SetInteractionMode(InteractionModeKind::Move))
.unwrap();
} else if message.destination() == self.select_mode {
self.sender
.send(Message::SetInteractionMode(InteractionModeKind::Select))
.unwrap();
} else if message.destination() == self.navmesh_mode {
self.sender
.send(Message::SetInteractionMode(InteractionModeKind::Navmesh))
.unwrap();
} else if message.destination() == self.terrain_mode {
self.sender
.send(Message::SetInteractionMode(InteractionModeKind::Terrain))
.unwrap();
}
}
UiMessageData::Widget(WidgetMessage::MouseDown { button, .. }) => {
if ui.is_node_child_of(message.destination(), self.move_mode)
&& *button == MouseButton::Right
{
self.sender
.send(Message::OpenSettings(SettingsSectionKind::MoveModeSettings))
.unwrap();
}
}
_ => {}
}
}
}
#[derive(Debug)]
pub enum Message {
DoSceneCommand(SceneCommand),
UndoSceneCommand,
RedoSceneCommand,
ClearSceneCommandStack,
SelectionChanged,
SyncToModel,
SaveScene(PathBuf),
LoadScene(PathBuf),
CloseScene,
SetInteractionMode(InteractionModeKind),
Log(String),
Configure { working_directory: PathBuf },
NewScene,
Exit { force: bool },
OpenSettings(SettingsSectionKind),
OpenMaterialEditor(Arc<Mutex<Material>>),
ShowInAssetBrowser(PathBuf),
}
pub fn make_scene_file_filter() -> Filter {
Filter::new(|p: &Path| {
if let Some(ext) = p.extension() {
ext.to_string_lossy().as_ref() == "rgs"
} else {
p.is_dir()
}
})
}
pub fn make_save_file_selector(ctx: &mut BuildContext) -> Handle<UiNode> {
FileSelectorBuilder::new(
WindowBuilder::new(WidgetBuilder::new().with_width(300.0).with_height(400.0))
.with_title(WindowTitle::Text("Save Scene As".into()))
.open(false),
)
.with_mode(FileBrowserMode::Save {
default_file_name: PathBuf::from("unnamed.rgs"),
})
.with_path("./")
.with_filter(make_scene_file_filter())
.build(ctx)
}
struct Editor {
sidebar: SideBar,
scene: Option<EditorScene>,
command_stack: CommandStack<SceneCommand>,
message_sender: Sender<Message>,
message_receiver: Receiver<Message>,
interaction_modes: Vec<InteractionMode>,
current_interaction_mode: Option<InteractionModeKind>,
world_outliner: WorldOutliner,
root_grid: Handle<UiNode>,
preview: ScenePreview,
asset_browser: AssetBrowser,
exit_message_box: Handle<UiNode>,
save_file_selector: Handle<UiNode>,
light_panel: LightPanel,
sound_panel: SoundPanel,
menu: Menu,
exit: bool,
configurator: Configurator,
log: Log,
command_stack_viewer: CommandStackViewer,
validation_message_box: Handle<UiNode>,
navmesh_panel: NavmeshPanel,
settings: Settings,
model_import_dialog: ModelImportDialog,
path_fixer: PathFixer,
material_editor: MaterialEditor,
}
impl Editor {
fn new(engine: &mut GameEngine) -> Self {
let (message_sender, message_receiver) = mpsc::channel();
*rg3d::gui::DEFAULT_FONT.0.lock().unwrap() = Font::from_memory(
include_bytes!("../resources/embed/arial.ttf").to_vec(),
14.0,
Font::default_char_set(),
)
.unwrap();
let configurator = Configurator::new(
message_sender.clone(),
&mut engine.user_interface.build_ctx(),
);
engine
.user_interface
.send_message(WindowMessage::open_modal(
configurator.window,
MessageDirection::ToWidget,
true,
));
let mut settings = Settings::default();
match Settings::load() {
Ok(s) => {
settings = s;
println!("Editor settings were loaded successfully!");
match engine
.renderer
.set_quality_settings(&settings.graphics.quality)
{
Ok(_) => {
println!("Graphics settings were applied successfully!");
}
Err(e) => {
println!("Failed to apply graphics settings! Reason: {:?}", e)
}
}
}
Err(e) => {
println!(
"Failed to load settings, fallback to default. Reason: {:?}",
e
)
}
}
let preview = ScenePreview::new(engine, message_sender.clone());
let asset_browser = AssetBrowser::new(engine);
let menu = Menu::new(engine, message_sender.clone(), &settings);
let light_panel = LightPanel::new(engine);
let ctx = &mut engine.user_interface.build_ctx();
let sidebar = SideBar::new(ctx, message_sender.clone());
let navmesh_panel = NavmeshPanel::new(ctx, message_sender.clone());
let world_outliner = WorldOutliner::new(ctx, message_sender.clone());
let command_stack_viewer = CommandStackViewer::new(ctx, message_sender.clone());
let sound_panel = SoundPanel::new(ctx);
let log = Log::new(ctx);
let model_import_dialog = ModelImportDialog::new(ctx);
let root_grid = GridBuilder::new(
WidgetBuilder::new()
.with_width(engine.renderer.get_frame_size().0 as f32)
.with_height(engine.renderer.get_frame_size().1 as f32)
.with_child(menu.menu)
.with_child(
DockingManagerBuilder::new(WidgetBuilder::new().on_row(1).with_child({
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::VerticalTiles {
splitter: 0.75,
tiles: [
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::HorizontalTiles {
splitter: 0.25,
tiles: [
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::VerticalTiles {
splitter: 0.6,
tiles: [
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::Window(
world_outliner.window,
))
.build(ctx),
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::Window(
sound_panel.window,
))
.build(ctx),
],
})
.build(ctx),
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::HorizontalTiles {
splitter: 0.66,
tiles: [
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::Window(
preview.window,
))
.build(ctx),
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::Window(
sidebar.window,
))
.build(ctx),
],
})
.build(ctx),
],
})
.build(ctx),
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::HorizontalTiles {
splitter: 0.66,
tiles: [
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::HorizontalTiles {
splitter: 0.80,
tiles: [
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::Window(
asset_browser.window,
))
.build(ctx),
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::Window(
command_stack_viewer.window,
))
.build(ctx),
],
})
.build(ctx),
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::HorizontalTiles {
splitter: 0.5,
tiles: [
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::Window(
log.window,
))
.build(ctx),
TileBuilder::new(WidgetBuilder::new())
.with_content(TileContent::Window(
navmesh_panel.window,
))
.build(ctx),
],
})
.build(ctx),
],
})
.build(ctx),
],
})
.build(ctx)
}))
.build(ctx),
),
)
.add_row(Row::strict(25.0))
.add_row(Row::stretch())
.add_column(Column::stretch())
.build(ctx);
let save_file_selector = make_save_file_selector(ctx);
let exit_message_box = MessageBoxBuilder::new(
WindowBuilder::new(WidgetBuilder::new().with_width(300.0).with_height(100.0))
.can_close(false)
.can_minimize(false)
.open(false)
.with_title(WindowTitle::Text("Unsaved changes".to_owned())),
)
.with_text("There are unsaved changes. Do you wish to save them before exit?")
.with_buttons(MessageBoxButtons::YesNoCancel)
.build(ctx);
let validation_message_box = MessageBoxBuilder::new(
WindowBuilder::new(WidgetBuilder::new().with_width(400.0).with_height(500.0))
.can_close(false)
.can_minimize(false)
.open(false)
.with_title(WindowTitle::Text("Validation failed!".to_owned())),
)
.with_buttons(MessageBoxButtons::Ok)
.build(ctx);
let path_fixer = PathFixer::new(ctx);
let test_material = Arc::new(Mutex::new(Material::standard()));
let mut material_editor = MaterialEditor::new(engine);
material_editor.set_material(Some(test_material), engine);
let mut editor = Self {
navmesh_panel,
sidebar,
sound_panel,
preview,
scene: None,
command_stack: CommandStack::new(false),
message_sender,
message_receiver,
interaction_modes: Default::default(),
current_interaction_mode: None,
world_outliner,
root_grid,
menu,
exit: false,
asset_browser,
exit_message_box,
save_file_selector,
configurator,
log,
light_panel,
command_stack_viewer,
validation_message_box,
settings,
model_import_dialog,
path_fixer,
material_editor,
};
editor.set_interaction_mode(Some(InteractionModeKind::Move), engine);
editor
}
fn set_scene(&mut self, engine: &mut GameEngine, mut scene: Scene, path: Option<PathBuf>) {
if let Some(previous_editor_scene) = self.scene.as_ref() {
engine.scenes.remove(previous_editor_scene.scene);
}
self.scene = None;
self.sync_to_model(engine);
poll_ui_messages(self, engine);
// Disable binder so we'll have full control over node's transform even if
// it has a physical body.
scene.physics_binder.enabled = false;
scene.render_target = Some(Texture::new_render_target(0, 0));
engine.user_interface.send_message(ImageMessage::texture(
self.preview.frame,
MessageDirection::ToWidget,
Some(into_gui_texture(scene.render_target.clone().unwrap())),
));
let root = BaseBuilder::new().build(&mut scene.graph);
let graph = &mut scene.graph;
let camera_controller = CameraController::new(graph, root);
let mut navmeshes = Pool::new();
for navmesh in scene.navmeshes.iter() {
let _ = navmeshes.spawn(Navmesh {
vertices: navmesh
.vertices()
.iter()
.map(|vertex| NavmeshVertex {
position: vertex.position,
})
.collect(),
triangles: navmesh
.triangles()
.iter()
.map(|triangle| NavmeshTriangle {
a: Handle::new(triangle[0], 1),
b: Handle::new(triangle[1], 1),
c: Handle::new(triangle[2], 1),
})
.collect(),
});
}
let editor_scene = EditorScene {
path: path.clone(),
root,
camera_controller,
physics: Physics::new(&scene),
navmeshes,
scene: engine.scenes.add(scene),
selection: Default::default(),
clipboard: Default::default(),
};
self.interaction_modes = vec![
InteractionMode::Select(SelectInteractionMode::new(
self.preview.frame,
self.preview.selection_frame,
self.message_sender.clone(),
)),
InteractionMode::Move(MoveInteractionMode::new(
&editor_scene,
engine,
self.message_sender.clone(),
)),
InteractionMode::Scale(ScaleInteractionMode::new(
&editor_scene,
engine,
self.message_sender.clone(),
)),
InteractionMode::Rotate(RotateInteractionMode::new(
&editor_scene,
engine,
self.message_sender.clone(),
)),
InteractionMode::Navmesh(EditNavmeshMode::new(
&editor_scene,
engine,
self.message_sender.clone(),
)),
InteractionMode::Terrain(TerrainInteractionMode::new(
&editor_scene,
engine,
self.message_sender.clone(),
self.sidebar.terrain_section.brush_section.brush.clone(),
)),
];
self.command_stack = CommandStack::new(false);
self.scene = Some(editor_scene);
self.set_interaction_mode(Some(InteractionModeKind::Move), engine);
self.sync_to_model(engine);
engine.user_interface.send_message(WindowMessage::title(
self.preview.window,
MessageDirection::ToWidget,
WindowTitle::Text(format!(
"Scene Preview - {}",
path.map_or("Unnamed Scene".to_string(), |p| p
.to_string_lossy()
.to_string())
)),
));
engine.renderer.flush();
}
fn set_interaction_mode(&mut self, mode: Option<InteractionModeKind>, engine: &mut GameEngine) {
if let Some(editor_scene) = self.scene.as_ref() {
if self.current_interaction_mode != mode {
// Deactivate current first.
if let Some(current_mode) = self.current_interaction_mode {
self.interaction_modes[current_mode as usize].deactivate(editor_scene, engine);
}
self.current_interaction_mode = mode;
}
}
}
pub fn handle_ui_message(&mut self, message: &UiMessage, engine: &mut GameEngine) {
scope_profile!();
// Prevent infinite message loops.
if message.has_flags(MSG_SYNC_FLAG) {
return;
}
self.configurator.handle_ui_message(message, engine);
self.menu.handle_ui_message(
message,
MenuContext {
engine,
editor_scene: self.scene.as_mut(),
sidebar_window: self.sidebar.window,
world_outliner_window: self.world_outliner.window,
asset_window: self.asset_browser.window,
configurator_window: self.configurator.window,
light_panel: self.light_panel.window,
log_panel: self.log.window,
settings: &mut self.settings,
path_fixer: self.path_fixer.window,
},
);
self.log.handle_ui_message(message, engine);
self.asset_browser.handle_ui_message(message, engine);
self.command_stack_viewer.handle_ui_message(message);
self.path_fixer
.handle_ui_message(message, &mut engine.user_interface);
if let Some(editor_scene) = self.scene.as_mut() {
self.navmesh_panel.handle_message(
message,
editor_scene,
engine,
if let InteractionMode::Navmesh(edit_mode) =
&mut self.interaction_modes[InteractionModeKind::Navmesh as usize]
{
edit_mode
} else {
unreachable!()
},
);
self.sound_panel
.handle_ui_message(&self.message_sender, editor_scene, message, engine);
self.sidebar
.handle_ui_message(message, editor_scene, engine);
self.world_outliner
.handle_ui_message(message, editor_scene, engine);
self.light_panel
.handle_ui_message(message, editor_scene, engine);
self.preview
.handle_ui_message(message, &engine.user_interface);
self.material_editor
.handle_ui_message(message, engine, &self.message_sender);
self.model_import_dialog.handle_ui_message(
message,
&engine.user_interface,
&self.message_sender,
);
let frame_size = engine
.user_interface
.node(self.preview.frame)
.screen_bounds()
.size;
if message.destination() == self.preview.frame {
if let UiMessageData::Widget(msg) = &message.data() {
match *msg {
WidgetMessage::MouseDown { button, pos, .. } => {
engine.user_interface.capture_mouse(self.preview.frame);
if button == MouseButton::Left {
if let Some(current_im) = self.current_interaction_mode {
let screen_bounds = engine
.user_interface
.node(self.preview.frame)
.screen_bounds();
let rel_pos = pos - screen_bounds.position;
self.preview.click_mouse_pos = Some(rel_pos);
self.interaction_modes[current_im as usize]
.on_left_mouse_button_down(
editor_scene,
engine,
rel_pos,
frame_size,
);
}
}
editor_scene.camera_controller.on_mouse_button_down(button);
}
WidgetMessage::MouseUp { button, pos, .. } => {
engine.user_interface.release_mouse_capture();
if button == MouseButton::Left {
self.preview.click_mouse_pos = None;
if let Some(current_im) = self.current_interaction_mode {
let screen_bounds = engine
.user_interface
.node(self.preview.frame)
.screen_bounds();
let rel_pos = pos - screen_bounds.position;
self.interaction_modes[current_im as usize]
.on_left_mouse_button_up(
editor_scene,
engine,
rel_pos,
frame_size,
);
}
}
editor_scene.camera_controller.on_mouse_button_up(button);
}
WidgetMessage::MouseWheel { amount, .. } => {
let graph = &mut engine.scenes[editor_scene.scene].graph;
editor_scene.camera_controller.on_mouse_wheel(amount, graph);
}
WidgetMessage::MouseMove { pos, .. } => {
let last_pos = *self.preview.last_mouse_pos.get_or_insert(pos);
let mouse_offset = pos - last_pos;
editor_scene.camera_controller.on_mouse_move(mouse_offset);
let screen_bounds = engine
.user_interface
.node(self.preview.frame)
.screen_bounds();
let rel_pos = pos - screen_bounds.position;
if let Some(current_im) = self.current_interaction_mode {
self.interaction_modes[current_im as usize].on_mouse_move(
mouse_offset,
rel_pos,
editor_scene.camera_controller.camera,
editor_scene,
engine,
frame_size,
&self.settings,
);
}
self.preview.last_mouse_pos = Some(pos);
}
WidgetMessage::KeyUp(key) => {
editor_scene.camera_controller.on_key_up(key);
if let Some(current_im) = self.current_interaction_mode {
self.interaction_modes[current_im as usize].on_key_up(
key,
editor_scene,
engine,
);
}
}
WidgetMessage::KeyDown(key) => {
editor_scene.camera_controller.on_key_down(key);
if let Some(current_im) = self.current_interaction_mode {
self.interaction_modes[current_im as usize].on_key_down(
key,
editor_scene,
engine,
);
}
match key {
KeyCode::Y => {
if engine.user_interface.keyboard_modifiers().control {
self.message_sender
.send(Message::RedoSceneCommand)
.unwrap();
}
}
KeyCode::Z => {
if engine.user_interface.keyboard_modifiers().control {
self.message_sender
.send(Message::UndoSceneCommand)
.unwrap();
}
}
KeyCode::Key1 => self.set_interaction_mode(
Some(InteractionModeKind::Select),
engine,
),
KeyCode::Key2 => self
.set_interaction_mode(Some(InteractionModeKind::Move), engine),
KeyCode::Key3 => self.set_interaction_mode(
Some(InteractionModeKind::Rotate),
engine,
),
KeyCode::Key4 => self
.set_interaction_mode(Some(InteractionModeKind::Scale), engine),
KeyCode::L
if engine.user_interface.keyboard_modifiers().control =>
{
self.menu
.open_load_file_selector(&mut engine.user_interface);
}
KeyCode::C
if engine.user_interface.keyboard_modifiers().control =>
{
if let Selection::Graph(graph_selection) =
&editor_scene.selection
{
editor_scene.clipboard.fill_from_selection(
graph_selection,
editor_scene.scene,
&editor_scene.physics,
engine,
);
}
}
KeyCode::V
if engine.user_interface.keyboard_modifiers().control =>
{
if !editor_scene.clipboard.is_empty() {
self.message_sender
.send(Message::DoSceneCommand(SceneCommand::Paste(
PasteCommand::new(),
)))
.unwrap();
}
}
KeyCode::Delete => {
if !editor_scene.selection.is_empty() {
match editor_scene.selection {
Selection::Graph(_) => {
self.message_sender
.send(Message::DoSceneCommand(
make_delete_selection_command(
editor_scene,
engine,
),
))
.unwrap();
}
Selection::Sound(ref selection) => {
let mut commands = selection
.sources()
.iter()
.map(|&source| {
SceneCommand::DeleteSoundSource(
DeleteSoundSourceCommand::new(source),
)
})
.collect::<Vec<_>>();
commands.insert(
0,
SceneCommand::ChangeSelection(
ChangeSelectionCommand::new(
Selection::None,
editor_scene.selection.clone(),
),
),
);
self.message_sender
.send(Message::DoSceneCommand(
SceneCommand::CommandGroup(
CommandGroup::from(commands),
),
))
.unwrap();
}
_ => (),
}
}
}
_ => (),
}
}
WidgetMessage::Drop(handle) => {
if handle.is_some() {
if let UiNode::User(EditorUiNode::AssetItem(item)) =
engine.user_interface.node(handle)
{
// Make sure all resources loaded with relative paths only.
// This will make scenes portable.
let relative_path = make_relative_path(&item.path);
match item.kind {
AssetKind::Model => {
self.model_import_dialog
.open(relative_path, &engine.user_interface);
}
AssetKind::Texture => {
let cursor_pos =
engine.user_interface.cursor_position();
let screen_bounds = engine
.user_interface
.node(self.preview.frame)
.screen_bounds();
let rel_pos = cursor_pos - screen_bounds.position;
let graph = &engine.scenes[editor_scene.scene].graph;
if let Some(result) =
editor_scene.camera_controller.pick(
rel_pos,
graph,
editor_scene.root,
frame_size,
false,
|_, _| true,
)
{
let tex = engine
.resource_manager
.request_texture(&relative_path, None);
let texture = tex.clone();
let texture = texture.state();
if let TextureState::Ok(_) = *texture {
match &mut engine.scenes[editor_scene.scene]
.graph[result.node]
{
Node::Mesh(_) => {
self.message_sender
.send(Message::DoSceneCommand(
SceneCommand::SetMeshTexture(
SetMeshTextureCommand::new(
result.node,
tex,
),
),
))
.unwrap();
}
Node::Sprite(_) => {
self.message_sender
.send(Message::DoSceneCommand(
SceneCommand::SetSpriteTexture(
SetSpriteTextureCommand::new(
result.node, Some(tex),
),
),
))
.unwrap();
}
Node::ParticleSystem(_) => {
self.message_sender
.send(Message::DoSceneCommand(
SceneCommand::SetParticleSystemTexture(
SetParticleSystemTextureCommand::new(
result.node, Some(tex),
),
),
))
.unwrap();
}
_ => {}
}
}
}
}
_ => {}
}
}
}
}
_ => {}
}
}
}
match message.data() {
UiMessageData::MessageBox(MessageBoxMessage::Close(result))
if message.destination() == self.exit_message_box =>
{
match result {
MessageBoxResult::No => {
self.message_sender
.send(Message::Exit { force: true })
.unwrap();
}
MessageBoxResult::Yes => {
if let Some(scene) = self.scene.as_ref() {
if let Some(path) = scene.path.as_ref() {
self.message_sender
.send(Message::SaveScene(path.clone()))
.unwrap();
self.message_sender
.send(Message::Exit { force: true })
.unwrap();
} else {
// Scene wasn't saved yet, open Save As dialog.
engine
.user_interface
.send_message(WindowMessage::open_modal(
self.save_file_selector,
MessageDirection::ToWidget,
true,
));
}
}
}
_ => {}
}
}
UiMessageData::FileSelector(FileSelectorMessage::Commit(path))
if message.destination() == self.save_file_selector =>
{
self.message_sender
.send(Message::SaveScene(path.clone()))
.unwrap();
self.message_sender
.send(Message::Exit { force: true })
.unwrap();
}
_ => (),
}
}
}
fn sync_to_model(&mut self, engine: &mut GameEngine) {
scope_profile!();
self.menu
.sync_to_model(self.scene.as_ref(), &mut engine.user_interface);
if let Some(editor_scene) = self.scene.as_mut() {
self.world_outliner.sync_to_model(editor_scene, engine);
self.sidebar.sync_to_model(editor_scene, engine);
self.navmesh_panel.sync_to_model(editor_scene, engine);
self.sound_panel.sync_to_model(editor_scene, engine);
self.material_editor
.sync_to_model(&mut engine.user_interface);
self.command_stack_viewer.sync_to_model(
&mut self.command_stack,
&SceneContext {
scene: &mut engine.scenes[editor_scene.scene],
message_sender: self.message_sender.clone(),
editor_scene,
resource_manager: engine.resource_manager.clone(),
},
&mut engine.user_interface,
)
} else {
self.world_outliner.clear(&mut engine.user_interface);
}
}
fn post_update(&mut self, engine: &mut GameEngine) {
if let Some(scene) = self.scene.as_mut() {
self.world_outliner.post_update(scene, engine);
}
}
fn update(&mut self, engine: &mut GameEngine, dt: f32) {
scope_profile!();
let mut needs_sync = false;
while let Ok(message) = self.message_receiver.try_recv() {
self.log.handle_message(&message, engine);
self.path_fixer
.handle_message(&message, &mut engine.user_interface);
match message {
Message::DoSceneCommand(command) => {
if let Some(editor_scene) = self.scene.as_mut() {
self.command_stack.do_command(
command,
SceneContext {
scene: &mut engine.scenes[editor_scene.scene],
message_sender: self.message_sender.clone(),
editor_scene,
resource_manager: engine.resource_manager.clone(),
},
);
needs_sync = true;
}
}
Message::UndoSceneCommand => {
if let Some(editor_scene) = self.scene.as_mut() {
self.command_stack.undo(SceneContext {
scene: &mut engine.scenes[editor_scene.scene],
message_sender: self.message_sender.clone(),
editor_scene,
resource_manager: engine.resource_manager.clone(),
});
needs_sync = true;
}
}
Message::RedoSceneCommand => {
if let Some(editor_scene) = self.scene.as_mut() {
self.command_stack.redo(SceneContext {
scene: &mut engine.scenes[editor_scene.scene],
message_sender: self.message_sender.clone(),
editor_scene,
resource_manager: engine.resource_manager.clone(),
});
needs_sync = true;
}
}
Message::ClearSceneCommandStack => {
if let Some(editor_scene) = self.scene.as_mut() {
self.command_stack.clear(SceneContext {
scene: &mut engine.scenes[editor_scene.scene],
message_sender: self.message_sender.clone(),
editor_scene,
resource_manager: engine.resource_manager.clone(),
});
needs_sync = true;
}
}
Message::SelectionChanged => {
self.world_outliner.sync_selection = true;
}
Message::SyncToModel => {
needs_sync = true;
}
Message::SaveScene(path) => {
if let Some(editor_scene) = self.scene.as_mut() {
match editor_scene.save(path.clone(), engine) {
Ok(message) => {
engine.user_interface.send_message(WindowMessage::title(
self.preview.window,
MessageDirection::ToWidget,
WindowTitle::Text(format!(
"Scene Preview - {}",
path.display()
)),
));
self.message_sender.send(Message::Log(message)).unwrap();
}
Err(message) => {
self.message_sender
.send(Message::Log(message.clone()))
.unwrap();
engine.user_interface.send_message(MessageBoxMessage::open(
self.validation_message_box,
MessageDirection::ToWidget,
None,
Some(message),
));
}
}
}
}
Message::LoadScene(scene_path) => {
let result = {
rg3d::core::futures::executor::block_on(Scene::from_file(
&scene_path,
engine.resource_manager.clone(),
&MaterialSearchOptions::UsePathDirectly,
))
};
match result {
Ok(scene) => {
self.set_scene(engine, scene, Some(scene_path));
}
Err(e) => {
self.message_sender
.send(Message::Log(e.to_string()))
.unwrap();
}
}
}
Message::SetInteractionMode(mode_kind) => {
self.set_interaction_mode(Some(mode_kind), engine);
}
Message::Exit { force } => {
if force {
self.exit = true;
} else if self.scene.is_some() {
engine.user_interface.send_message(MessageBoxMessage::open(
self.exit_message_box,
MessageDirection::ToWidget,
None,
None,
));
} else {
self.exit = true;
}
}
Message::Log(msg) => {
println!("{}", msg);
}
Message::CloseScene => {
if let Some(editor_scene) = self.scene.take() {
engine.scenes.remove(editor_scene.scene);
needs_sync = true;
// Preview frame has scene frame texture assigned, it must be cleared explicitly,
// otherwise it will show last rendered frame in preview which is not what we want.
engine.user_interface.send_message(ImageMessage::texture(
self.preview.frame,
MessageDirection::ToWidget,
None,
));
}
}
Message::NewScene => {
let mut scene = Scene::new();
scene.ambient_lighting_color = Color::opaque(200, 200, 200);
self.set_scene(engine, scene, None);
}
Message::Configure { working_directory } => {
assert!(self.scene.is_none());
self.asset_browser.clear_preview(engine);
std::env::set_current_dir(working_directory.clone()).unwrap();
engine.get_window().set_title(&format!(
"rusty-editor: {}",
working_directory.to_string_lossy().to_string()
));
engine.resource_manager.state().destroy_unused_resources();
engine.renderer.flush();
self.asset_browser
.set_working_directory(engine, &working_directory);
self.model_import_dialog
.set_working_directory(engine, &working_directory);
self.message_sender
.send(Message::Log(format!(
"New working directory was successfully set: {:?}",
working_directory
)))
.unwrap();
needs_sync = true;
}
Message::OpenSettings(section) => {
self.menu
.settings
.open(&engine.user_interface, &self.settings, Some(section));
}
Message::OpenMaterialEditor(material) => {
self.material_editor.set_material(Some(material), engine);
engine.user_interface.send_message(WindowMessage::open(
self.material_editor.window,
MessageDirection::ToWidget,
true,
));
}
Message::ShowInAssetBrowser(path) => {
self.asset_browser.locate_path(&engine.user_interface, path);
}
}
}
if needs_sync {
self.sync_to_model(engine);
}
if let Some(editor_scene) = self.scene.as_mut() {
// Adjust camera viewport to size of frame.
let scene = &mut engine.scenes[editor_scene.scene];
scene.drawing_context.clear_lines();
let camera = scene.graph[editor_scene.camera_controller.camera].as_camera_mut();
camera.set_z_near(self.settings.graphics.z_near);
camera.set_z_far(self.settings.graphics.z_far);
// Create new render target if preview frame has changed its size.
let (rt_width, rt_height) = if let TextureKind::Rectangle { width, height } =
scene.render_target.clone().unwrap().data_ref().kind()
{
(width, height)
} else {
unreachable!();
};
if let UiNode::Image(frame) = engine.user_interface.node(self.preview.frame) {
let frame_size = frame.actual_size();
if rt_width != frame_size.x as u32 || rt_height != frame_size.y as u32 {
let rt = Texture::new_render_target(frame_size.x as u32, frame_size.y as u32);
scene.render_target = Some(rt.clone());
engine.user_interface.send_message(ImageMessage::texture(
self.preview.frame,
MessageDirection::ToWidget,
Some(into_gui_texture(rt)),
));
}
}
if let Selection::Graph(selection) = &editor_scene.selection {
for &node in selection.nodes() {
let node = &scene.graph[node];
let aabb = match node {
Node::Base(_) => AxisAlignedBoundingBox::unit(),
Node::Light(_) => AxisAlignedBoundingBox::unit(),
Node::Camera(_) => AxisAlignedBoundingBox::unit(),
Node::Mesh(ref mesh) => mesh.bounding_box(),
Node::Sprite(_) => AxisAlignedBoundingBox::unit(),
Node::ParticleSystem(_) => AxisAlignedBoundingBox::unit(),
Node::Terrain(ref terrain) => terrain.bounding_box(),
Node::Decal(_) => AxisAlignedBoundingBox::unit(),
};
scene
.drawing_context
.draw_oob(&aabb, node.global_transform(), Color::GREEN);
}
}
fn draw_recursively(
node: Handle<Node>,
graph: &Graph,
ctx: &mut SceneDrawingContext,
editor_scene: &EditorScene,
show_tbn: bool,
show_bounds: bool,
) {
// Ignore editor nodes.
if node == editor_scene.root {
return;
}
let node = &graph[node];
match node {
Node::Base(_) => {
if show_bounds {
ctx.draw_oob(
&AxisAlignedBoundingBox::unit(),
node.global_transform(),
Color::opaque(255, 127, 39),
);
}
}
Node::Mesh(mesh) => {
if show_tbn {
// TODO: Add switch to settings to turn this on/off
let transform = node.global_transform();
for surface in mesh.surfaces() {
for vertex in surface.data().read().unwrap().vertex_buffer.iter() {
let len = 0.025;
let position = transform
.transform_point(&Point3::from(
vertex
.read_3_f32(VertexAttributeUsage::Position)
.unwrap(),
))
.coords;
let vertex_tangent =
vertex.read_4_f32(VertexAttributeUsage::Tangent).unwrap();
let tangent = transform
.transform_vector(&vertex_tangent.xyz())
.normalize()
.scale(len);
let normal = transform
.transform_vector(
&vertex
.read_3_f32(VertexAttributeUsage::Normal)
.unwrap()
.xyz(),
)
.normalize()
.scale(len);
let binormal = tangent
.xyz()
.cross(&normal)
.scale(vertex_tangent.w)
.normalize()
.scale(len);
ctx.add_line(Line {
begin: position,
end: position + tangent,
color: Color::RED,
});
ctx.add_line(Line {
begin: position,
end: position + normal,
color: Color::BLUE,
});
ctx.add_line(Line {
begin: position,
end: position + binormal,
color: Color::GREEN,
});
}
}
}
}
_ => {}
}
for &child in node.children() {
draw_recursively(child, graph, ctx, editor_scene, show_tbn, show_bounds)
}
}
// Draw pivots.
draw_recursively(
scene.graph.get_root(),
&scene.graph,
&mut scene.drawing_context,
editor_scene,
self.settings.debugging.show_tbn,
self.settings.debugging.show_bounds,
);
if self.settings.debugging.show_physics {
editor_scene
.physics
.draw(&mut scene.drawing_context, &scene.graph);
}
let graph = &mut scene.graph;
editor_scene.camera_controller.update(graph, dt);
if let Some(mode) = self.current_interaction_mode {
self.interaction_modes[mode as usize].update(
editor_scene,
editor_scene.camera_controller.camera,
engine,
);
}
self.asset_browser.update(engine);
self.material_editor.update(engine);
}
}
}
fn poll_ui_messages(editor: &mut Editor, engine: &mut GameEngine) {
scope_profile!();
while let Some(ui_message) = engine.user_interface.poll_message() {
editor.handle_ui_message(&ui_message, engine);
}
}
fn update(
editor: &mut Editor,
engine: &mut GameEngine,
elapsed_time: &mut f32,
fixed_timestep: f32,
clock: &Instant,
) {
scope_profile!();
let mut dt = clock.elapsed().as_secs_f32() - *elapsed_time;
while dt >= fixed_timestep {
dt -= fixed_timestep;
*elapsed_time += fixed_timestep;
engine.update(fixed_timestep);
editor.update(engine, fixed_timestep);
poll_ui_messages(editor, engine);
editor.post_update(engine);
if dt >= 1.5 * fixed_timestep {
break;
}
}
let window = engine.get_window();
window.set_cursor_icon(translate_cursor_icon(engine.user_interface.cursor()));
window.request_redraw();
}
fn main() {
let event_loop = EventLoop::new();
let inner_size = if let Some(primary_monitor) = event_loop.primary_monitor() {
let mut monitor_dimensions = primary_monitor.size();
monitor_dimensions.height = (monitor_dimensions.height as f32 * 0.7) as u32;
monitor_dimensions.width = (monitor_dimensions.width as f32 * 0.7) as u32;
monitor_dimensions.to_logical::<f32>(primary_monitor.scale_factor())
} else {
LogicalSize::new(1024.0, 768.0)
};
let window_builder = rg3d::window::WindowBuilder::new()
.with_inner_size(inner_size)
.with_title("rusty editor")
.with_resizable(true);
let mut engine = GameEngine::new(window_builder, &event_loop, true).unwrap();
engine.resource_manager.state().set_textures_import_options(
TextureImportOptions::default().with_compression(CompressionOptions::NoCompression),
);
let overlay_pass = OverlayRenderPass::new(engine.renderer.pipeline_state());
engine.renderer.add_render_pass(overlay_pass);
let mut editor = Editor::new(&mut engine);
let clock = Instant::now();
let fixed_timestep = 1.0 / 60.0;
let mut elapsed_time = 0.0;
event_loop.run(move |event, _, control_flow| match event {
Event::MainEventsCleared => {
update(
&mut editor,
&mut engine,
&mut elapsed_time,
fixed_timestep,
&clock,
);
if editor.exit {
*control_flow = ControlFlow::Exit;
}
}
Event::RedrawRequested(_) => {
engine.render().unwrap();
}
Event::WindowEvent { event, .. } => {
match event {
WindowEvent::CloseRequested => {
editor
.message_sender
.send(Message::Exit { force: false })
.unwrap();
}
WindowEvent::Resized(size) => {
if let Err(e) = engine.renderer.set_frame_size(size.into()) {
rg3d::utils::log::Log::writeln(
MessageKind::Error,
format!("Failed to set renderer size! Reason: {:?}", e),
);
}
engine.user_interface.send_message(WidgetMessage::width(
editor.root_grid,
MessageDirection::ToWidget,
size.width as f32,
));
engine.user_interface.send_message(WidgetMessage::height(
editor.root_grid,
MessageDirection::ToWidget,
size.height as f32,
));
}
_ => (),
}
if let Some(os_event) = translate_event(&event) {
engine.user_interface.process_os_event(&os_event);
}
}
Event::LoopDestroyed => {
if let Ok(profiling_results) = rg3d::core::profiler::print() {
if let Ok(mut file) =
fs::File::create(project_dirs::working_data_dir("profiling.log"))
{
let _ = writeln!(file, "{}", profiling_results);
}
}
}
_ => *control_flow = ControlFlow::Poll,
});
}
| make_color_material |
tervela.py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' ESP Tervela Connector '''
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import re
import six
from .base import Connector, prop, map_properties
from ..utils import xml
from ..utils.data import gen_name
class TervelaSubscriber(Connector):
'''
Subscribe to Tervela Data Fabric events
Parameters
----------
tvaprimarytmx : string
Specifies the host name or IP address of the primary TMX
tvauserid : string
Specifies a user name defined in the Tervela TPM.
Publish-topic entitlement rights must be associated with
this user name.
tvapassword : string
Specifies the password associated with tvauserid
tvatopic : string
Specifies the topic name for the topic to which to subscribed.
This topic must be configured on the TPM for the GD service and
tvauserid must be assigned the Guaranteed Delivery subscribe
rights for this Topic in the TPM.
tvaclientname : string
Specifies the client name associated with the Tervela
Guaranteed Delivery context.
tvamaxoutstand : int
Specifies the maximum number of unacknowledged messages that
can be published to the Tervela fabric (effectively the size of
the publication cache). Should be twice the expected transmit rate.
numbufferedmsgs : int
Specifies the maximum number of messages buffered by a standby
subscriber connector.
urlhostport : string
Specifies the “host/port” string sent in the metadata message
published by the connector on topic SAS.META.tvaclientname when
it starts.
snapshot : boolean, optional
Specifies whether to send snapshot data
collapse : string, optional
Enables conversion of UPDATE_BLOCK events to make subscriber
output publishable. The default value is disabled.
hotfailover : boolean, optional
Enables hot failover mode
tvasecondarytmx : string, optional
Specifies the host name or IP address of the secondary TMX.
Required if logging in to a fault-tolerant pair.
tvalogfile : string, optional
Causes the connector to log to the specified file instead of to
syslog (on Linux or Solaris) or Tervela.log (on Windows)
tvapubbwlimit : int, optional
Specifies the maximum bandwidth, in Mbps, of data published to
the fabric. The default value is 100 Mbps.
tvapubrate : int, optional
Specifies the rate at which data messages are published to the
fabric, in Kbps. The default value is 30,000 messages per second.
tvapubmsgexp : int, optional
Specifies the maximum amount of time, in seconds, that published
messages are kept in the cache in the Tervela API.
rmretdel : boolean, optional
Specifies to remove all delete events from event blocks received
by a subscriber that were introduced by a window retention policy.
configfilesection : string, optional
Specifies the name of the section in the connector config file to
parse for configuration parameters. Specify the value
as [configfilesection].
protofile : string, optional
Specifies the .proto file that contains the Google Protocol Buffers
message definition. This definition is used to convert event blocks
to protobuf messages. When you specify this parameter, you must
also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter. Event
blocks are converted into this message.
json : boolean, optional
Enables transport of event blocks encoded as JSON messages
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields in
CSV events. The default behavior is these fields are interpreted
as an integer number of seconds (ESP_DATETIME) or microseconds
(ESP_TIMESTAMP) since epoch.
tvapasswordencrypted : boolean, optional
Specifies that tvapassword is encrypted
Returns
-------
:class:`TervelaSubscriber`
'''
connector_key = dict(cls='tervela', type='subscribe')
property_defs = dict(
tvaprimarytmx=prop('tvaprimarytmx', dtype='string', required=True),
tvauserid=prop('tvauserid', dtype='string', required=True),
tvapassword=prop('tvapassword', dtype='string', required=True), | tvamaxoutstand=prop('tvamaxoutstand', dtype='int', required=True),
numbufferedmsgs=prop('numbufferedmsgs', dtype='int', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
snapshot=prop('snapshot', dtype='string', required=True, default=False),
collapse=prop('collapse', dtype='string'),
hotfailover=prop('hotfailover', dtype='boolean'),
tvasecondarytmx=prop('tvasecondarytmx', dtype='string'),
tvalogfile=prop('tvalogfile', dtype='string'),
tvapubbwlimit=prop('tvapubbwlimit', dtype='int'),
tvapubrate=prop('tvapubrate', dtype='int'),
tvapubmsgexp=prop('tvapubmsgexp', dtype='int'),
rmretdel=prop('rmretdel', dtype='boolean'),
configfilesection=prop('configfilesection', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
json=prop('json', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
tvapasswordencrypted=prop('tvapasswordencrypted', dtype='boolean')
)
def __init__(self, tvaprimarytmx=None, tvauserid=None, tvapassword=None,
tvatopic=None, tvaclientname=None, tvamaxoutstand=None,
numbufferedmsgs=None, urlhostport=None,
name=None, is_active=None, snapshot=None,
collapse=None, hotfailover=None, tvasecondarytmx=None,
tvalogfile=None, tvapubbwlimit=None, tvapubrate=None,
tvapubmsgexp=None, rmretdel=None, configfilesection=None,
protofile=None, protomsg=None, json=None,
dateformat=None, tvapasswordencrypted=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'tervela', name=name, type='subscribe',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['tvaprimarytmx',
'tvauserid',
'tvapassword',
'tvatopic',
'tvaclientname',
'tvamaxoutstand',
'numbufferedmsgs',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5], req[6],
req[7], name=name, is_active=is_active, **properties)
class TervelaPublisher(Connector):
'''
Subscribe to Tervela Data Fabric events
Parameters
----------
tvaprimarytmx : string
Specifies the host name or IP address of the primary TMX
tvauserid : string
Specifies a user name defined in the Tervela TPM. Subscribe-topic
entitlement rights must be associated with this user name.
tvapassword : string
Specifies the password associated with tvauserid
tvatopic : string
Specifies the topic name for the topic to which to publish. This
topic must be configured on the TPM for the GD service.
tvaclientname : string
Specifies the client name associated with the Tervela Guaranteed
Delivery context. Must be unique among all instances of
Tervela connectors.
tvasubname : string
Specifies the name assigned to the Guaranteed Delivery subscription
being created. The combination of this name and tvaclientname
are used by the fabric to replay the last subscription state
urlhostport : string
Specifies the “host:port” string sent in the metadata message
published by the connector on topic SAS.META.tvaclientname when
it starts.
tvasecondarytmx : string, optional
Specifies the host name or IP address of the secondary TMX.
Required when logging in to a fault-tolerant pair.
tvalogfile : string, optional
Causes the connector to log to the specified file instead of to
syslog (on Linux or Solaris) or Tervela.log (on Windows)
configfilesection : string, optional
Specifies the name of the section in the connector config file
to parse for configuration parameters. Specify the value
as [configfilesection].
protofile : string, optional
Specifies the .proto file that contains the Google Protocol
Buffers message definition. This definition is used to convert
event blocks to protobuf messages. When you specify this
parameter, you must also specify the protomsg parameter.
protomsg : string, optional
Specifies the name of a Google Protocol Buffers message in the
.proto file that you specified with the protofile parameter.
Event blocks are converted into this message.
json : boolean, optional
Enables transport of event blocks encoded as JSON messages.
publishwithupsert : boolean, optional
Specifies to build events with opcode = Upsert instead of
opcode = Insert.
dateformat : string, optional
Specifies the format of ESP_DATETIME and ESP_TIMESTAMP fields
in CSV events. The default behavior is these fields are
interpreted as an integer number of seconds (ESP_DATETIME)
or microseconds (ESP_TIMESTAMP) since epoch.
tvapasswordencrypted : boolean, optional
Specifies that tvapassword is encrypted
maxevents : int, optional
Specifies the maximum number of events to publish.
Returns
-------
:class:`TervelaPublisher`
'''
connector_key = dict(cls='tva', type='publish')
property_defs = dict(
tvaprimarytmx=prop('tvaprimarytmx', dtype='string', required=True),
tvauserid=prop('tvauserid', dtype='string', required=True),
tvapassword=prop('tvapassword', dtype='string', required=True),
tvatopic=prop('tvatopic', dtype='string', required=True),
tvaclientname=prop('tvaclientname', dtype='string', required=True),
tvasubname=prop('tvasubname', dtype='string', required=True),
urlhostport=prop('urlhostport', dtype='string', required=True),
tvasecondarytmx=prop('tvasecondarytmx', dtype='string'),
tvalogfile=prop('tvalogfile', dtype='string'),
configfilesection=prop('configfilesection', dtype='string'),
protofile=prop('protofile', dtype='string'),
protomsg=prop('protomsg', dtype='string'),
json=prop('json', dtype='boolean'),
publishwithupsert=prop('publishwithupsert', dtype='boolean'),
dateformat=prop('dateformat', dtype='string'),
tvapasswordencrypted=prop('tvapasswordencrypted', dtype='boolean'),
maxevents=prop('maxevents', dtype='int')
)
def __init__(self, tvaprimarytmx=None, tvauserid=None,
tvapassword=None, tvatopic=None,
tvaclientname=None, tvasubname=None, urlhostport=None,
name=None, is_active=None,
tvasecondarytmx=None, tvalogfile=None,
configfilesection=None, protofile=None, protomsg=None,
json=None, publishwithupsert=None, dateformat=None,
tvapasswordencrypted=None, maxevents=None):
params = dict(**locals())
params.pop('is_active')
params.pop('self')
name = params.pop('name')
Connector.__init__(self, 'tva', name=name, type='publish',
is_active=is_active, properties=params)
@classmethod
def from_parameters(cls, conncls, type=None, name=None, is_active=None,
properties=None):
req, properties = map_properties(cls, properties,
required=['tvaprimarytmx',
'tvauserid',
'tvapassword',
'tvatopic',
'tvaclientname',
'tvasubname',
'urlhostport'],
delete='type')
return cls(req[0], req[1], req[2], req[3], req[4], req[5], req[6],
name=name, is_active=is_active, **properties) | tvatopic=prop('tvatopic', dtype='string', required=True),
tvaclientname=prop('tvaclientname', dtype='string', required=True), |
SetTopicAttributesCommand.ts | import * as __aws_sdk_middleware_stack from "@aws-sdk/middleware-stack";
import * as __aws_sdk_types from "@aws-sdk/types";
import { SetTopicAttributes } from "../model/operations/SetTopicAttributes";
import { InputTypesUnion } from "../types/InputTypesUnion";
import { OutputTypesUnion } from "../types/OutputTypesUnion";
import { SetTopicAttributesInput } from "../types/SetTopicAttributesInput";
import { SetTopicAttributesOutput } from "../types/SetTopicAttributesOutput";
import { SNSResolvedConfiguration } from "../SNSConfiguration";
export * from "../types/SetTopicAttributesInput";
export * from "../types/SetTopicAttributesOutput";
export * from "../types/SetTopicAttributesExceptionsUnion";
export class |
implements
__aws_sdk_types.Command<
InputTypesUnion,
SetTopicAttributesInput,
OutputTypesUnion,
SetTopicAttributesOutput,
SNSResolvedConfiguration,
Blob
> {
readonly model = SetTopicAttributes;
readonly middlewareStack = new __aws_sdk_middleware_stack.MiddlewareStack<
SetTopicAttributesInput,
SetTopicAttributesOutput,
Blob
>();
constructor(readonly input: SetTopicAttributesInput) {}
resolveMiddleware(
clientStack: __aws_sdk_middleware_stack.MiddlewareStack<
InputTypesUnion,
OutputTypesUnion,
Blob
>,
configuration: SNSResolvedConfiguration
): __aws_sdk_types.Handler<
SetTopicAttributesInput,
SetTopicAttributesOutput
> {
const { handler } = configuration;
const stack = clientStack.concat(this.middlewareStack);
const handlerExecutionContext: __aws_sdk_types.HandlerExecutionContext = {
logger: {} as any,
model: this.model
};
return stack.resolve(
handler<SetTopicAttributesInput, SetTopicAttributesOutput>(
handlerExecutionContext
),
handlerExecutionContext
);
}
}
| SetTopicAttributesCommand |
test_Signing.py | from mqfactory import Message
from mqfactory.message.security import Signing, Signature
from mqfactory.tools import Policy, Rule
def test_signing_setup(mq, transport, signature):
Signing( mq, adding=signature )
mq.before_sending.append.assert_called_with(signature.sign)
mq.before_handling.append.assert_called_with(signature.validate)
class MockSignature(Signature):
def | (self, message):
return True
def _validate(self, message):
return True
def test_signing_policy():
signature = MockSignature(Policy([
Rule({"to": "unencrypted"}, False)
]))
encrypted = Message("encrypted", "message")
not_encrypted = Message("unencrypted", "message")
assert signature.sign(encrypted)
assert signature.sign(not_encrypted) == False
assert signature.validate(encrypted)
assert signature.validate(not_encrypted) == False
| _sign |
app.js | /*
|--------------------------------------------------------------------------
| Providers
|--------------------------------------------------------------------------
|
| Providers are building blocks for your Adonis app. Anytime you install
| a new Adonis specific package, chances are you will register the
| provider here.
|
*/
const providers = [
'@adonisjs/framework/providers/AppProvider',
'@adonisjs/framework/providers/ViewProvider',
'@adonisjs/lucid/providers/LucidProvider',
'@adonisjs/bodyparser/providers/BodyParserProvider',
'@adonisjs/cors/providers/CorsProvider',
'@adonisjs/shield/providers/ShieldProvider',
'@adonisjs/session/providers/SessionProvider',
'@adonisjs/auth/providers/AuthProvider',
'@adonisjs/validator/providers/ValidatorProvider',
'@adonisjs/mail/providers/MailProvider',
'@adonisjs/ally/providers/AllyProvider'
]
/*
|--------------------------------------------------------------------------
| Ace Providers
|--------------------------------------------------------------------------
|
| Ace providers are required only when running ace commands. For example
| Providers for migrations, tests etc.
|
*/
const aceProviders = [
'@adonisjs/lucid/providers/MigrationsProvider'
]
/*
|--------------------------------------------------------------------------
| Aliases
|--------------------------------------------------------------------------
|
| Aliases are short unique names for IoC container bindings. You are free
| to create your own aliases.
|
| For example:
| { Route: 'Adonis/Src/Route' }
|
*/
const aliases = {}
/*
|--------------------------------------------------------------------------
| Commands
|--------------------------------------------------------------------------
|
| Here you store ace commands for your package
|
*/
const commands = [
'App/Commands/PullPrice'
] | module.exports = {
providers, aceProviders, aliases, commands
} | |
filesystem_test.go | // +build !windows
package spec
import "testing"
func TestFilesystemGenerator(t *testing.T) {
g := &FilesystemGenerator{}
if g.Key() != "filesystem" { | }
}
func TestFilesystemGenerate(t *testing.T) {
g := &FilesystemGenerator{}
result, err := g.Generate()
if err != nil {
t.Skipf("Generate() failed: %s", err)
}
_, resultTypeOk := result.(map[string]map[string]interface{})
if !resultTypeOk {
t.Errorf("Return type of Generate() shuold be map[string]map[string]interface{}")
}
} | t.Error("key should be 'filesystem'") |
vizRadialArcs.js | /*******************************************************************************
* arcs coming out from a circle
*/
function VizRadialArcs(variant) {
this.dampen = true;
this.hasVariants = true;
this.variants = [[false, true], [true, false], [false, false]];
this.vary(variant);
}
VizRadialArcs.prototype.vary = function(variant) {
this.variant = variant;
this.gap = this.variants[variant][0];
this.fade = this.variants[variant][1];
}
VizRadialArcs.prototype.resize = function() {}
VizRadialArcs.prototype.draw = function(spectrum) {
ctx.save();
ctx.clearRect(0, 0, cv.width, cv.height)
ctx.translate(cv.width / 2, cv.height / 2);
ctx.rotate(allRotate); | var hue = Math.floor(360.0 / bandCount * i);
var brightness = 99;
if (this.fade) {
var brightness = constrain(Math.floor(spectrum[i] / 1.5), 25, 99);
}
ctx.fillStyle = bigColorMap[hue * 100 + brightness];
ctx.beginPath();
if (this.gap) {
ctx.arc(0, 0, centerRadius + Math.max(spectrum[i] * heightMultiplier, 2),
0, rotateAmount / 2);
} else {
ctx.arc(0, 0, centerRadius + Math.max(spectrum[i] * heightMultiplier, 2),
0, rotateAmount + 0.005);
}
ctx.lineTo(0, 0);
ctx.fill();
ctx.closePath();
}
ctx.fillStyle = '#000000';
ctx.beginPath();
ctx.arc(0, 0, centerRadius, 0, 2 * Math.PI, false);
ctx.fill();
ctx.closePath();
allRotate += 0.002;
ctx.restore();
} | for (var i = 0; i < bandCount; i++) {
ctx.rotate(rotateAmount); |
remove_test.go | package config
import (
"io/ioutil"
"strings"
"testing"
"github.com/docker/cli/internal/test"
"github.com/docker/cli/internal/test/testutil"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
)
func TestConfigRemoveErrors(t *testing.T) {
testCases := []struct {
args []string
configRemoveFunc func(string) error
expectedError string
}{
{
args: []string{},
expectedError: "requires at least 1 argument.",
},
{
args: []string{"foo"},
configRemoveFunc: func(name string) error {
return errors.Errorf("error removing config") | }
for _, tc := range testCases {
cmd := newConfigRemoveCommand(
test.NewFakeCli(&fakeClient{
configRemoveFunc: tc.configRemoveFunc,
}),
)
cmd.SetArgs(tc.args)
cmd.SetOutput(ioutil.Discard)
testutil.ErrorContains(t, cmd.Execute(), tc.expectedError)
}
}
func TestConfigRemoveWithName(t *testing.T) {
names := []string{"foo", "bar"}
var removedConfigs []string
cli := test.NewFakeCli(&fakeClient{
configRemoveFunc: func(name string) error {
removedConfigs = append(removedConfigs, name)
return nil
},
})
cmd := newConfigRemoveCommand(cli)
cmd.SetArgs(names)
assert.NoError(t, cmd.Execute())
assert.Equal(t, names, strings.Split(strings.TrimSpace(cli.OutBuffer().String()), "\n"))
assert.Equal(t, names, removedConfigs)
}
func TestConfigRemoveContinueAfterError(t *testing.T) {
names := []string{"foo", "bar"}
var removedConfigs []string
cli := test.NewFakeCli(&fakeClient{
configRemoveFunc: func(name string) error {
removedConfigs = append(removedConfigs, name)
if name == "foo" {
return errors.Errorf("error removing config: %s", name)
}
return nil
},
})
cmd := newConfigRemoveCommand(cli)
cmd.SetArgs(names)
cmd.SetOutput(ioutil.Discard)
assert.EqualError(t, cmd.Execute(), "error removing config: foo")
assert.Equal(t, names, removedConfigs)
} | },
expectedError: "error removing config",
}, |
index.ts | import Tsue from '../instance'
import Dep from './dep'
import { def, isObject } from '../utils'
import arrayMethods from './array'
/**
* 用于生成observer实例, observer会把value变成响应式,返回 Observer 实例
* @param value
*/
export function observe(value: any): Observer | void{
if(!isObject(value)) { // 只有对象类型才需要变为响应式
return;
}
let ob: Observer
if(value.__ob__){ // 如果value上已经存在, __ob__: observer实例, 说明value已经是响应式对象
ob = value.__ob__
}
ob = new Observer(value)
return ob;
}
export interface ObserveValue {
__ob__ ?: Observer
}
/**
* 遍历value调用defineReactive,把value变成响应式
*/
export class Observer {
value: any;
dep: Dep;
constructor(value: ObserveValue) {
this.dep = new Dep() // 订阅收集器
def(value, '__ob__', this); // 给 value 增加一个 __ob__ 值,指向 Observer 实例,表示value是响应式对象
if(Array.isArray(value)) { // 判断value是否是数组
Object.setPrototypeOf(value, arrayMethods); // value.__proto__ = arrayMethods
this.observeArray(value)
} else {
this.walk(value)
}
}
// 遍历obj的属性, 将每个属性变成响应式
walk(obj: any) {
Object.keys(obj).forEach((key) => {
defineReactive(obj, key)
})
}
// 遍历数组,把数组中的每个子项变成响应式。注意:并不是把数组变成响应式
observeArray(arr: Array<any>) {
for (let i = 0; i < arr.length; i++) {
observe(arr[i])
}
}
}
/**
* 定义一个响应式对象,给对象添加 getter 和 setter
* @param obj
* @param key
* @param val
*/
export function defineReactive(obj: Record<any, any>, key: string, val?: any) {
const dep = new Dep()
const property = Object.getOwnPropertyDescriptor(obj, key)
if (property && property.configurable === false) { // 属性不可更改就直接返回
return
}
const getter = property && property.get
const setter = property && property.set
if ((!getter || setter) && arguments.length === 2) {
val = obj[key]
}
let childOb = observe(val) // 对子对象递归调用observe,使子对象也变为响应式。childOb为Observer实例
Object.defineProperty(obj, key, {
enumerable: true,
configurable: true,
get: function() {
const value = getter ? getter.call(obj) : val;
if (Dep.target) {
dep.depend()
if (childOb) {
childOb.dep.depend()
}
}
return value
},
set: function(newVal) {
const value = getter ? getter.call(obj) : val
if (newVal === value || (newVal !== newVal && value !== value)) {
return
}
val = newVal // 将val变为新值。必包
childOb = observe(newVal) // 如果 newVal 是 object类型,则将其也变为响应式对象
dep.notify() // 数据更新,通知watcher
}
})
}
/**
* 代理属性
* @param vm
* @param source
* @param key
*/
export function proxy(vm:Tsue, source: string, key: string) {
const sharedPropertyDefinition = Object.create(null)
sharedPropertyDefinition.enumerable = true;
sharedPropertyDefinition.configurable = true;
sharedPropertyDefinition.get = function proxyGetter () {
return vm[source][key]
}
sharedPropertyDefinition.set = function proxySetter (val: any) {
vm[source][key] = val
} | } | Object.defineProperty(vm, key, sharedPropertyDefinition) |
LinRNNModel.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
from torch import autograd
from torch.autograd import Variable
import torch.nn as nn
from maptrainer.model.MAPModel import MAPModel
from ..data import INIT_RANGE_BOUND
class LinRNNModel(MAPModel):
"""
`LinRNNModel`: Linear-output RNN model
Container module standing for an RNN with a linear output layer
"""
def __init__(self,
n_input,
_n_hid,
_nlayers,
variant="LSTM",
dropout=0.0,
**kwargs):
super(LinRNNModel, self).__init__()
self.variant = variant
self.nhid = _n_hid
self.nlayers = _nlayers
self.drop = nn.Dropout(dropout)
# The linear layer as projection that maps hidden state space to
# vertices' space namely that this linear layer has as many units
# as there are vertices
self.linearise = nn.Linear(_n_hid, n_input)
if variant in ['LSTM', 'GRU']:
self.rnn = getattr(nn, variant)(n_input, _n_hid, _nlayers,
dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[
variant]
except KeyError:
raise ValueError("""An invalid option for `--model` was
supplied, options are ['LSTM', 'GRU', 'RNN_TANH' or
RNN_RELU']""")
self.rnn = nn.RNN(n_input, _n_hid, _nlayers,
nonlinearity=nonlinearity, dropout=dropout)
self.init_parameters()
def forward(self, _input, hidden):
"""
:param _input:
Shape: N x T x n_in
:type _input: FloatTensor or Variable
:param hidden: (h_t, c_t)
:type hidden:
:return:
:rtype:
"""
_input = _input.permute(1, 0, 2) # The dimension representing the
# index of elements in a sequence (or the tth element of the
# sequence) is put into the 1st dim (axis 0) and the one
# representing indices of sequence (the nth sequence) into the 2nd
# dim (axis 1). Henceforth, `_input` will have a shape of `T x N x
# n_ins`.
dropped_out_input = self.drop(_input)
self.rnn.flatten_parameters()
output, hidden = self.rnn(dropped_out_input, hidden) | linearised = self.linearise(dropped_out_output)
return linearised, hidden
def init_hidden(self, bsz):
weight = next(self.parameters()).data
if self.variant == 'LSTM':
return (
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()),
Variable(weight.new(self.nlayers, bsz, self.nhid).zero_()))
# returns (h_t, c_t)
else:
return Variable(weight.new(self.nlayers, bsz, self.nhid).zero_())
def predict(self, _input, bsz):
if isinstance(_input, np.ndarray):
_input = autograd.Variable(torch.from_numpy(_input).float())
if isinstance(_input, autograd.Variable):
if len(_input.size()) == 2:
_input = _input.view(len(_input), 1, -1)
sizes = _input.size()
if sizes[1] == 1:
_input = _input.expand(sizes[0], bsz, sizes[2])
else:
raise TypeError(
"_input must be a np.ndarray or an autograd.Variable")
hidden = self.init_hidden(bsz)
outputs, hidden = self(_input, hidden)
return outputs[:, 0, :], hidden[:, 0, :] | dropped_out_output = self.drop(output) |
consensus_test.go | // Copyright 2018 The MATRIX Authors as well as Copyright 2014-2017 The go-ethereum Authors
// This file is consisted of the MATRIX library and part of the go-ethereum library.
//
// The MATRIX-ethereum library is free software: you can redistribute it and/or modify it under the terms of the MIT License.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
//and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject tothe following conditions:
//
//The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
//WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISINGFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
//OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package manash
import (
"encoding/json"
"math/big"
"os"
"path/filepath"
"testing"
"github.com/matrix/go-matrix/common/math"
"github.com/matrix/go-matrix/core/types"
"github.com/matrix/go-matrix/params"
)
type diffTest struct {
ParentTimestamp uint64
ParentDifficulty *big.Int
CurrentTimestamp uint64
CurrentBlocknumber *big.Int
CurrentDifficulty *big.Int
}
func (d *diffTest) UnmarshalJSON(b []byte) (err error) {
var ext struct {
ParentTimestamp string
ParentDifficulty string
CurrentTimestamp string
CurrentBlocknumber string
CurrentDifficulty string
}
if err := json.Unmarshal(b, &ext); err != nil |
d.ParentTimestamp = math.MustParseUint64(ext.ParentTimestamp)
d.ParentDifficulty = math.MustParseBig256(ext.ParentDifficulty)
d.CurrentTimestamp = math.MustParseUint64(ext.CurrentTimestamp)
d.CurrentBlocknumber = math.MustParseBig256(ext.CurrentBlocknumber)
d.CurrentDifficulty = math.MustParseBig256(ext.CurrentDifficulty)
return nil
}
func TestCalcDifficulty(t *testing.T) {
file, err := os.Open(filepath.Join("..", "..", "tests", "testdata", "BasicTests", "difficulty.json"))
if err != nil {
t.Skip(err)
}
defer file.Close()
tests := make(map[string]diffTest)
err = json.NewDecoder(file).Decode(&tests)
if err != nil {
t.Fatal(err)
}
config := ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1150000)}
for name, test := range tests {
number := new(big.Int).Sub(test.CurrentBlocknumber, big.NewInt(1))
diff := CalcDifficulty(config, test.CurrentTimestamp, &types.Header{
Number: number,
Time: new(big.Int).SetUint64(test.ParentTimestamp),
Difficulty: test.ParentDifficulty,
})
if diff.Cmp(test.CurrentDifficulty) != 0 {
t.Error(name, "failed. Expected", test.CurrentDifficulty, "and calculated", diff)
}
}
}
| {
return err
} |
punctuators.rs | use serde::{Deserialize, Serialize};
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(tag = "type")]
pub enum Punctuator {
LBrace,
LParen,
RParen,
LBracket,
RBracket,
Dot,
Ellipsis,
Semicolon,
Comma,
LAngle,
RAngle,
LEqual,
GEqual,
Equal,
NEqual,
StrictEqual,
StrictNEqual,
Plus,
Minus,
Star,
Mod,
Pow,
Inc,
Dec,
LShift,
RShift,
ZRShift,
BitAnd,
BitOr,
BitXor,
Bang,
Tilde,
LogicalAnd,
LogicalOr,
Question,
Colon,
Assign,
PlusAssign,
MinusAssign,
StarAssign,
ModAssign,
PowAssign,
LShiftAssign,
RShiftAssign,
ZRShiftAssign,
BitAndAssign,
BitOrAssign,
BitXorAssign,
FatArrow,
Slash,
SlashAssign,
RBrace,
Instanceof,
// ES2020
QMarkDot,
DoubleQMark,
}
impl Punctuator {
// *= /= %= += -= <<= >>= >>>= &= ^= |= **=
pub fn is_assign_op(&self) -> bool {
match self {
Punctuator::PlusAssign
| Punctuator::MinusAssign
| Punctuator::StarAssign
| Punctuator::ModAssign
| Punctuator::PowAssign
| Punctuator::LShiftAssign
| Punctuator::RShiftAssign
| Punctuator::ZRShiftAssign
| Punctuator::BitAndAssign
| Punctuator::BitOrAssign
| Punctuator::BitXorAssign
| Punctuator::SlashAssign => true,
Punctuator::Equal => true,
_ => false,
}
}
pub fn | (&self) -> bool {
match self {
Punctuator::Equal
| Punctuator::StrictEqual
| Punctuator::NEqual
| Punctuator::StrictNEqual => true,
_ => false,
}
}
pub fn to_str(&self) -> &'static str {
match self {
Punctuator::LBrace => "{",
Punctuator::LParen => "(",
Punctuator::RParen => ")",
Punctuator::LBracket => "[",
Punctuator::RBracket => "]",
Punctuator::Dot => ".",
Punctuator::Ellipsis => "...",
Punctuator::Semicolon => ";",
Punctuator::Comma => ",",
Punctuator::LAngle => "<",
Punctuator::RAngle => ">",
Punctuator::LEqual => "<=",
Punctuator::GEqual => ">=",
Punctuator::Equal => "==",
Punctuator::NEqual => "!=",
Punctuator::StrictEqual => "===",
Punctuator::StrictNEqual => "!==",
Punctuator::Plus => "+",
Punctuator::Minus => "-",
Punctuator::Star => "*",
Punctuator::Mod => "%",
Punctuator::Pow => "**",
Punctuator::Inc => "++",
Punctuator::Dec => "--",
Punctuator::LShift => "<<",
Punctuator::RShift => ">>",
Punctuator::ZRShift => ">>>",
Punctuator::BitAnd => "&",
Punctuator::BitOr => "|",
Punctuator::BitXor => "^",
Punctuator::Bang => "!",
Punctuator::Tilde => "~",
Punctuator::LogicalAnd => "&&",
Punctuator::LogicalOr => "||",
Punctuator::Question => "?",
Punctuator::Colon => ":",
Punctuator::Assign => "=",
Punctuator::PlusAssign => "+=",
Punctuator::MinusAssign => "-=",
Punctuator::StarAssign => "*=",
Punctuator::ModAssign => "%=",
Punctuator::PowAssign => "**=",
Punctuator::LShiftAssign => "<<=",
Punctuator::RShiftAssign => ">>=",
Punctuator::ZRShiftAssign => ">>>=",
Punctuator::BitAndAssign => "&=",
Punctuator::BitOrAssign => "|=",
Punctuator::BitXorAssign => "^=",
Punctuator::FatArrow => "=>",
Punctuator::Slash => "/",
Punctuator::SlashAssign => "/=",
Punctuator::RBrace => "}",
Punctuator::Instanceof => "instanceof",
// ES2020
Punctuator::QMarkDot => "?.",
Punctuator::DoubleQMark => "??",
}
}
pub fn to_string(&self) -> String {
match self {
Punctuator::LBrace => String::from("{"),
Punctuator::LParen => String::from("("),
Punctuator::RParen => String::from(")"),
Punctuator::LBracket => String::from("["),
Punctuator::RBracket => String::from("]"),
Punctuator::Dot => String::from("."),
Punctuator::Ellipsis => String::from("..."),
Punctuator::Semicolon => String::from(";"),
Punctuator::Comma => String::from(","),
Punctuator::LAngle => String::from("<"),
Punctuator::RAngle => String::from(">"),
Punctuator::LEqual => String::from("<="),
Punctuator::GEqual => String::from(">="),
Punctuator::Equal => String::from("=="),
Punctuator::NEqual => String::from("!="),
Punctuator::StrictEqual => String::from("==="),
Punctuator::StrictNEqual => String::from("!=="),
Punctuator::Plus => String::from("+"),
Punctuator::Minus => String::from("-"),
Punctuator::Star => String::from("*"),
Punctuator::Mod => String::from("%"),
Punctuator::Pow => String::from("**"),
Punctuator::Inc => String::from("++"),
Punctuator::Dec => String::from("--"),
Punctuator::LShift => String::from("<<"),
Punctuator::RShift => String::from(">>"),
Punctuator::ZRShift => String::from(">>>"),
Punctuator::BitAnd => String::from("&"),
Punctuator::BitOr => String::from("|"),
Punctuator::BitXor => String::from("^"),
Punctuator::Bang => String::from("!"),
Punctuator::Tilde => String::from("~"),
Punctuator::LogicalAnd => String::from("&&"),
Punctuator::LogicalOr => String::from("||"),
Punctuator::Question => String::from("?"),
Punctuator::Colon => String::from(":"),
Punctuator::Assign => String::from("="),
Punctuator::PlusAssign => String::from("+="),
Punctuator::MinusAssign => String::from("-="),
Punctuator::StarAssign => String::from("*="),
Punctuator::ModAssign => String::from("%="),
Punctuator::PowAssign => String::from("**="),
Punctuator::LShiftAssign => String::from("<<="),
Punctuator::RShiftAssign => String::from(">>="),
Punctuator::ZRShiftAssign => String::from(">>>="),
Punctuator::BitAndAssign => String::from("&="),
Punctuator::BitOrAssign => String::from("|="),
Punctuator::BitXorAssign => String::from("^="),
Punctuator::FatArrow => String::from("=>"),
Punctuator::Slash => String::from("/"),
Punctuator::SlashAssign => String::from("/="),
Punctuator::RBrace => String::from("}"),
Punctuator::Instanceof => String::from("instanceof"),
// ES2020
Punctuator::QMarkDot => String::from("?."),
Punctuator::DoubleQMark => String::from("??"),
}
}
}
impl From<&str> for Punctuator {
fn from(s: &str) -> Self {
match s {
"{" => Punctuator::LBrace,
"(" => Punctuator::LParen,
")" => Punctuator::RParen,
"[" => Punctuator::LBracket,
"]" => Punctuator::RBracket,
"." => Punctuator::Dot,
"..." => Punctuator::Ellipsis,
";" => Punctuator::Semicolon,
"," => Punctuator::Comma,
"<" => Punctuator::LAngle,
">" => Punctuator::RAngle,
"<=" => Punctuator::LEqual,
">=" => Punctuator::GEqual,
"==" => Punctuator::Equal,
"!=" => Punctuator::NEqual,
"===" => Punctuator::StrictEqual,
"!==" => Punctuator::StrictNEqual,
"+" => Punctuator::Plus,
"-" => Punctuator::Minus,
"*" => Punctuator::Star,
"%" => Punctuator::Mod,
"**" => Punctuator::Pow,
"++" => Punctuator::Inc,
"--" => Punctuator::Dec,
"<<" => Punctuator::LShift,
">>" => Punctuator::RShift,
">>>" => Punctuator::ZRShift,
"&" => Punctuator::BitAnd,
"|" => Punctuator::BitOr,
"^" => Punctuator::BitXor,
"!" => Punctuator::Bang,
"~" => Punctuator::Tilde,
"&&" => Punctuator::LogicalAnd,
"||" => Punctuator::LogicalOr,
"?" => Punctuator::Question,
":" => Punctuator::Colon,
"=" => Punctuator::Assign,
"+=" => Punctuator::PlusAssign,
"-=" => Punctuator::MinusAssign,
"*=" => Punctuator::StarAssign,
"%=" => Punctuator::ModAssign,
"**=" => Punctuator::PowAssign,
"<<=" => Punctuator::LShiftAssign,
">>=" => Punctuator::RShiftAssign,
">>>=" => Punctuator::ZRShiftAssign,
"&=" => Punctuator::BitAndAssign,
"|=" => Punctuator::BitOrAssign,
"^=" => Punctuator::BitXorAssign,
"=>" => Punctuator::FatArrow,
"/" => Punctuator::Slash,
"/=" => Punctuator::SlashAssign,
"}" => Punctuator::RBrace,
"instanceof" => Punctuator::Instanceof,
// ES2020
"?." => Punctuator::QMarkDot,
"??" => Punctuator::DoubleQMark,
_ => panic!("Invalid punctuator."),
}
}
}
| is_equality_op |
gslbvserver_domain_binding.go | package gslb
type Gslbvserverdomainbinding struct {
Backupip string `json:"backupip,omitempty"`
Backupipflag bool `json:"backupipflag,omitempty"` | Name string `json:"name,omitempty"`
Sitedomainttl int `json:"sitedomainttl,omitempty"`
Ttl int `json:"ttl,omitempty"`
} | Cookiedomain string `json:"cookie_domain,omitempty"`
Cookiedomainflag bool `json:"cookie_domainflag,omitempty"`
Cookietimeout int `json:"cookietimeout,omitempty"`
Domainname string `json:"domainname,omitempty"` |
min_length.rs | use crate::{
compilation::{context::CompilationContext, JSONSchema},
error::{error, no_error, CompilationError, ErrorIterator, ValidationError},
keywords::CompilationResult,
paths::InstancePath,
validator::Validate,
};
use serde_json::{Map, Value};
pub(crate) struct MinLengthValidator {
limit: u64,
}
impl MinLengthValidator {
#[inline]
pub(crate) fn compile(schema: &Value) -> CompilationResult {
if let Some(limit) = schema.as_u64() {
Ok(Box::new(MinLengthValidator { limit }))
} else {
Err(CompilationError::SchemaError)
}
}
}
| return false;
}
}
true
}
fn validate<'a>(
&self,
_: &'a JSONSchema,
instance: &'a Value,
instance_path: &InstancePath,
) -> ErrorIterator<'a> {
if let Value::String(item) = instance {
if (item.chars().count() as u64) < self.limit {
return error(ValidationError::min_length(
instance_path.into(),
instance,
self.limit,
));
}
}
no_error()
}
}
impl ToString for MinLengthValidator {
fn to_string(&self) -> String {
format!("minLength: {}", self.limit)
}
}
#[inline]
pub(crate) fn compile(
_: &Map<String, Value>,
schema: &Value,
_: &CompilationContext,
) -> Option<CompilationResult> {
Some(MinLengthValidator::compile(schema))
} | impl Validate for MinLengthValidator {
fn is_valid(&self, _: &JSONSchema, instance: &Value) -> bool {
if let Value::String(item) = instance {
if (item.chars().count() as u64) < self.limit { |
index.tsx | import * as React from 'react';
import * as Highcharts from 'highcharts';
import { Row, Col } from 'react-bootstrap';
import HeatmapPlot, { HeatMapProps } from '../../../../display_modules/plots/HeatmapPlot';
import HighChartsPlot from '../../../plots/HighChartsPlot';
import { HeatMapDatum } from '../../../../display_modules/plots/HeatmapPlot/util/heatmap';
import { PathwaysType } from '../../../../services/api/models/queryResult';
import { SvgRefProps } from '../../../components/DisplayContainer/d3';
import PathwaysControls from './components/PathwaysControls';
export interface PathwaysProps extends SvgRefProps {
data: PathwaysType;
isSingleton?: boolean;
}
export interface PathwaysState {
activeMetric: string;
}
export default class BetaDiversityContainer extends React.Component<PathwaysProps, PathwaysState> {
protected color: d3.ScaleOrdinal<string, string>;
constructor(props: PathwaysProps) {
super(props);
const metadata = this.metaDataFromProps(props);
this.state = {
activeMetric: metadata.metrics[0],
};
this.handleMetricChange = this.handleMetricChange.bind(this);
}
chartOptions(): Highcharts.Options {
const {sampleNames} = this.metaDataFromProps(this.props),
metric = this.state.activeMetric,
dataSet: {[key: string]: number} = this.props.data.samples[sampleNames[0]][metric],
pathways = Object.keys(dataSet);
const data = pathways.map(pathway => dataSet[pathway]);
const chartOptions: Highcharts.Options = {
chart: {
type: 'column',
},
title: {
text: null,
},
legend: {
enabled: true,
},
xAxis: {
categories: pathways,
},
yAxis: {
title: {
text: 'Relative Abundance',
},
},
exporting: {
enabled: false,
},
series: [{
name: metric.displayFormat(),
data,
}],
};
return chartOptions;
}
heatmapOptions(): HeatMapProps {
const {sampleNames, pathways} = this.metaDataFromProps(this.props),
metric = this.state.activeMetric;
const newValues: HeatMapDatum[] = [];
sampleNames.forEach((columnName, column) => {
pathways.forEach((rowName, row) => {
newValues.push({
x: column,
y: row,
value: this.props.data.samples[columnName][metric][rowName],
});
});
});
const result = {
axis: {
x: sampleNames.map(name => ({name})),
y: pathways,
},
data: newValues,
buckets: 10,
maxAxisNameLength: 100,
axisNameSize: 12,
legend: {
precision: 3,
},
svgRef: this.props.svgRef,
};
return result;
}
metaDataFromProps(props: PathwaysProps) {
const sampleNames = Object.keys(props.data.samples),
metrics = Object.keys(props.data.samples[sampleNames[0]]),
pathways = Object.keys(props.data.samples[sampleNames[0]].pathway_abundances);
return {sampleNames, metrics, pathways};
}
controlProps() {
const metadata = this.metaDataFromProps(this.props);
return {
metrics: metadata.metrics,
activeMetric: this.state.activeMetric,
handleMetricChange: this.handleMetricChange,
};
}
handleMetricChange(metric: string) {
this.setState({activeMetric: metric});
}
render() {
const isSingleton = this.props.isSingleton || false;
if (isSingleton) {
const chartOptions = this.chartOptions(); | chartId="pathways"
options={chartOptions}
chartRef={() => {}} // tslint:disable-line no-empty
/>
</Col>
</Row>
);
}
const chartProps = this.heatmapOptions(),
controlProps = this.controlProps();
return (
<Row>
<Col lg={9}>
<HeatmapPlot {...chartProps} />
</Col>
<Col lg={3}>
<PathwaysControls {...controlProps} />
</Col>
</Row>
);
}
} | return (
<Row>
<Col lg={12}>
<HighChartsPlot |
database.service.ts | import { Injectable } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import { TypeOrmModuleOptions, TypeOrmOptionsFactory } from '@nestjs/typeorm';
@Injectable()
export class | implements TypeOrmOptionsFactory {
constructor(private configService: ConfigService) {}
createTypeOrmOptions(): TypeOrmModuleOptions {
return {
type: this.configService.get<any>('database.type'),
host: this.configService.get<string>('database.host'),
port: this.configService.get<number>('database.port'),
username: this.configService.get<string>('database.user'),
password: this.configService.get<string>('database.password'),
database: this.configService.get<string>('database.name'),
synchronize: true,
autoLoadEntities: true,
dropSchema: false,
};
}
}
| TypeOrmConfigService |
test_categorical.py | """
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import string
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical
from pandas.api.types import CategoricalDtype
from pandas.tests.extension import base
import pandas.util.testing as tm
def make_data():
while True:
values = np.random.choice(list(string.ascii_letters), size=100)
# ensure we meet the requirements
# 1. first two not null
# 2. first and second are different
if values[0] != values[1]:
break
return values
@pytest.fixture
def dtype():
return CategoricalDtype()
@pytest.fixture
def data():
"""Length-100 array for this type.
| * data[0] and data[1] should not gbe equal
"""
return Categorical(make_data())
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return Categorical([np.nan, "A"])
@pytest.fixture
def data_for_sorting():
return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)
@pytest.fixture
def data_missing_for_sorting():
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
@pytest.fixture
def na_value():
return np.nan
@pytest.fixture
def data_for_grouping():
return Categorical(["a", "a", None, None, "b", "b", "a", "c"])
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
@pytest.mark.skip(reason="Memory usage doesn't match")
def test_memory_usage(self, data):
# Is this deliberate?
super().test_memory_usage(data)
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
def test_ravel(self, data):
# GH#27199 Categorical.ravel returns self until after deprecation cycle
with tm.assert_produces_warning(FutureWarning):
data.ravel()
class TestGetitem(base.BaseGetitemTests):
skip_take = pytest.mark.skip(reason="GH-20664.")
@pytest.mark.skip(reason="Backwards compatibility")
def test_getitem_scalar(self, data):
# CategoricalDtype.type isn't "correct" since it should
# be a parent of the elements (object). But don't want
# to break things by changing.
super().test_getitem_scalar(data)
@skip_take
def test_take(self, data, na_value, na_cmp):
# TODO remove this once Categorical.take is fixed
super().test_take(data, na_value, na_cmp)
@skip_take
def test_take_negative(self, data):
super().test_take_negative(data)
@skip_take
def test_take_pandas_style_negative_raises(self, data, na_value):
super().test_take_pandas_style_negative_raises(data, na_value)
@skip_take
def test_take_non_na_fill_value(self, data_missing):
super().test_take_non_na_fill_value(data_missing)
@skip_take
def test_take_out_of_bounds_raises(self, data, allow_fill):
return super().test_take_out_of_bounds_raises(data, allow_fill)
@pytest.mark.skip(reason="GH-20747. Unobserved categories.")
def test_take_series(self, data):
super().test_take_series(data)
@skip_take
def test_reindex_non_na_fill_value(self, data_missing):
super().test_reindex_non_na_fill_value(data_missing)
@pytest.mark.skip(reason="Categorical.take buggy")
def test_take_empty(self, data, na_value, na_cmp):
super().test_take_empty(data, na_value, na_cmp)
@pytest.mark.skip(reason="test not written correctly for categorical")
def test_reindex(self, data, na_value):
super().test_reindex(data, na_value)
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_pad(self, data_missing):
super().test_fillna_limit_pad(data_missing)
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_backfill(self, data_missing):
super().test_fillna_limit_backfill(data_missing)
class TestReduce(base.BaseNoReduceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="Unobserved categories included")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
def test_combine_add(self, data_repeated):
# GH 20825
# When adding categoricals in combine, result is a string
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
expected = pd.Series(
([a + b for (a, b) in zip(list(orig_data1), list(orig_data2))])
)
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series([a + val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
@pytest.mark.skip(reason="Not Applicable")
def test_fillna_length_mismatch(self, data_missing):
super().test_fillna_length_mismatch(data_missing)
def test_searchsorted(self, data_for_sorting):
if not data_for_sorting.ordered:
raise pytest.skip(reason="searchsorted requires ordered data.")
class TestCasting(base.BaseCastingTests):
pass
class TestArithmeticOps(base.BaseArithmeticOpsTests):
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
op_name = all_arithmetic_operators
if op_name != "__rmod__":
super().test_arith_series_with_scalar(data, op_name)
else:
pytest.skip("rmod never called when string is first argument")
def test_add_series_with_extension_array(self, data):
ser = pd.Series(data)
with pytest.raises(TypeError, match="cannot perform"):
ser + data
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
return super()._check_divmod_op(s, op, other, exc=TypeError)
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
op = self.get_op_from_name(op_name)
if op_name == "__eq__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x == y)
assert (result == expected).all()
elif op_name == "__ne__":
result = op(s, other)
expected = s.combine(other, lambda x, y: x != y)
assert (result == expected).all()
else:
with pytest.raises(TypeError):
op(data, other)
class TestParsing(base.BaseParsingTests):
pass | * data[0] and data[1] should both be non missing |
volume_provisioning.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"strings"
"time"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
storage "k8s.io/api/storage/v1"
storagebeta "k8s.io/api/storage/v1beta1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
// Plugin name of the external provisioner
externalPluginName = "example.com/nfs"
// Number of PVCs for multi PVC tests
multiPVCcount = 3
)
func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZone bool) {
checkZonesFromLabelAndAffinity(pv, sets.NewString(zone), matchZone)
}
// checkZoneLabelAndAffinity checks the LabelZoneFailureDomain label of PV and terms
// with key LabelZoneFailureDomain in PV's node affinity contains zone
// matchZones is used to indicate if zones should match perfectly
func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) {
ginkgo.By("checking PV's zone label and node affinity terms match expected zone")
if pv == nil {
framework.Failf("nil pv passed")
}
pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on PV", v1.LabelZoneFailureDomain)
}
zonesFromLabel, err := volumehelpers.LabelZonesToSet(pvLabel)
if err != nil {
framework.Failf("unable to parse zone labels %s: %v", pvLabel, err)
}
if matchZones && !zonesFromLabel.Equal(zones) {
framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if !matchZones && !zonesFromLabel.IsSuperset(zones) {
framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if pv.Spec.NodeAffinity == nil {
framework.Failf("node affinity not found in PV spec %v", pv.Spec)
}
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
framework.Failf("node selector terms not found in PV spec %v", pv.Spec)
}
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
keyFound := false
for _, r := range term.MatchExpressions {
if r.Key != v1.LabelZoneFailureDomain {
continue
}
keyFound = true
zonesFromNodeAffinity := sets.NewString(r.Values...)
if matchZones && !zonesFromNodeAffinity.Equal(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
break
}
if !keyFound {
framework.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term)
}
}
}
// checkAWSEBS checks properties of an AWS EBS. Test framework does not
// instantiate full AWS provider, therefore we need use ec2 API directly.
func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
diskName := volume.Spec.AWSElasticBlockStore.VolumeID
var client *ec2.EC2
tokens := strings.Split(diskName, "/")
volumeID := tokens[len(tokens)-1]
zone := framework.TestContext.CloudConfig.Zone
if len(zone) > 0 {
region := zone[:len(zone)-1]
cfg := aws.Config{Region: ®ion}
e2elog.Logf("using region %s", region)
client = ec2.New(session.New(), &cfg)
} else {
e2elog.Logf("no region configured")
client = ec2.New(session.New())
}
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
info, err := client.DescribeVolumes(request)
if err != nil {
return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
}
if len(info.Volumes) == 0 {
return fmt.Errorf("no volumes found for volume %q", volumeID)
}
if len(info.Volumes) > 1 {
return fmt.Errorf("multiple volumes found for volume %q", volumeID)
}
awsVolume := info.Volumes[0]
if awsVolume.VolumeType == nil {
return fmt.Errorf("expected volume type %q, got nil", volumeType)
}
if *awsVolume.VolumeType != volumeType {
return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType)
}
if encrypted && awsVolume.Encrypted == nil {
return fmt.Errorf("expected encrypted volume, got no encryption")
}
if encrypted && !*awsVolume.Encrypted {
return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted)
}
return nil
}
func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
cloud, err := gce.GetGCECloud()
if err != nil {
return err
}
diskName := volume.Spec.GCEPersistentDisk.PDName
disk, err := cloud.GetDiskByNameUnknownZone(diskName)
if err != nil {
return err
}
if !strings.HasSuffix(disk.Type, volumeType) {
return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType)
}
return nil
}
func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTopology bool, pvcCount int) {
storageClassTestNameFmt := "Delayed binding %s storage class test %s"
storageClassTestNameSuffix := ""
if specifyAllowedTopology {
storageClassTestNameSuffix += " with AllowedTopologies"
}
tests := []testsuites.StorageClassTest{
{
Name: fmt.Sprintf(storageClassTestNameFmt, "EBS", storageClassTestNameSuffix),
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
ClaimSize: "2Gi",
DelayBinding: true,
},
{
Name: fmt.Sprintf(storageClassTestNameFmt, "GCE PD", storageClassTestNameSuffix),
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
ClaimSize: "2Gi",
DelayBinding: true,
},
}
for _, test := range tests {
if !framework.ProviderIs(test.CloudProviders...) {
e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue
}
action := "creating claims with class with waitForFirstConsumer"
suffix := "delayed"
var topoZone string
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
if specifyAllowedTopology {
action += " and allowedTopologies"
suffix += "-topo"
topoZone = getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone)
}
ginkgo.By(action)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &test.Class.Name
claims = append(claims, claim)
}
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil {
framework.Failf("unexpected nil node found")
}
zone, ok := node.Labels[v1.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain)
}
if specifyAllowedTopology && topoZone != zone {
framework.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone)
}
for _, pv := range pvs {
checkZoneFromLabelAndAffinity(pv, zone, true)
}
}
}
var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
f := framework.NewDefaultFramework("volume-provisioning")
// filled in BeforeEach
var c clientset.Interface
var ns string
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
ginkgo.Describe("DynamicProvisioner [Slow]", func() {
ginkgo.It("should provision storage with different parameters", func() {
// This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods.
tests := []testsuites.StorageClassTest{
// GCE/GKE
{
Name: "SSD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-ssd",
"zone": getRandomClusterZone(c),
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-ssd")
framework.ExpectNoError(err, "checkGCEPD pd-ssd")
},
},
{
Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard")
framework.ExpectNoError(err, "checkGCEPD pd-standard")
},
},
// AWS
{
Name: "gp2 EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "gp2",
"zone": getRandomClusterZone(c),
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", false)
framework.ExpectNoError(err, "checkAWSEBS gp2")
},
},
{
Name: "io1 EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "io1",
"iopsPerGB": "50",
},
ClaimSize: "3.5Gi",
ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "io1", false)
framework.ExpectNoError(err, "checkAWSEBS io1")
},
},
{
Name: "sc1 EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "sc1",
},
ClaimSize: "500Gi", // minimum for sc1
ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "sc1", false)
framework.ExpectNoError(err, "checkAWSEBS sc1")
},
},
{
Name: "st1 EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "st1",
},
ClaimSize: "500Gi", // minimum for st1
ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "st1", false)
framework.ExpectNoError(err, "checkAWSEBS st1")
},
},
{
Name: "encrypted EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"encrypted": "true",
},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", true)
framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
},
},
// OpenStack generic tests (works on all OpenStack deployments)
{
Name: "generic Cinder volume on OpenStack",
CloudProviders: []string{"openstack"},
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
},
},
{
Name: "Cinder volume with empty volume type and zone on OpenStack",
CloudProviders: []string{"openstack"},
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{
"type": "",
"availability": "",
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
},
},
// vSphere generic test
{
Name: "generic vSphere volume",
CloudProviders: []string{"vsphere"},
Provisioner: "kubernetes.io/vsphere-volume",
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
},
},
// Azure
{
Name: "Azure disk volume with empty sku and location",
CloudProviders: []string{"azure"},
Provisioner: "kubernetes.io/azure-disk",
Parameters: map[string]string{},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
},
},
}
var betaTest *testsuites.StorageClassTest
for i, t := range tests {
// Beware of clojure, use local variables instead of those from
// outer scope
test := t
if !framework.ProviderIs(test.CloudProviders...) {
e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue
}
// Remember the last supported test for subsequent test of beta API
betaTest = &test
ginkgo.By("Testing " + test.Name)
suffix := fmt.Sprintf("%d", i)
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
test.TestDynamicProvisioning()
}
// Run the last test with storage.k8s.io/v1beta1 on pvc
if betaTest != nil {
ginkgo.By("Testing " + betaTest.Name + " with beta volume provisioning")
class := newBetaStorageClass(*betaTest, "beta")
// we need to create the class manually, testDynamicProvisioning does not accept beta class
class, err := c.StorageV1beta1().StorageClasses().Create(class)
framework.ExpectNoError(err)
defer deleteStorageClass(c, class.Name)
betaTest.Client = c
betaTest.Class = nil
betaTest.Claim = newClaim(*betaTest, ns, "beta")
betaTest.Claim.Spec.StorageClassName = &(class.Name)
(*betaTest).TestDynamicProvisioning()
}
})
ginkgo.It("should provision storage with non-default reclaim policy Retain", func() {
framework.SkipUnlessProviderIs("gce", "gke")
test := testsuites.StorageClassTest{
Client: c,
Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard")
framework.ExpectNoError(err, "checkGCEPD")
},
}
test.Class = newStorageClass(test, ns, "reclaimpolicy")
retain := v1.PersistentVolumeReclaimRetain
test.Class.ReclaimPolicy = &retain
test.Claim = newClaim(test, ns, "reclaimpolicy")
test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning()
ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
ginkgo.By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
ginkgo.By(fmt.Sprintf("deleting the PV %q", pv.Name))
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
})
ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() {
framework.SkipUnlessProviderIs("gce", "gke")
var suffix string = "unmananged"
ginkgo.By("Discovering an unmanaged zone")
allZones := sets.NewString() // all zones in the project
managedZones := sets.NewString() // subset of allZones
gceCloud, err := gce.GetGCECloud()
framework.ExpectNoError(err)
// Get all k8s managed zones (same as zones with nodes in them for test)
managedZones, err = gceCloud.GetAllZonesFromCloudProvider()
framework.ExpectNoError(err)
// Get a list of all zones in the project
zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
framework.ExpectNoError(err)
for _, z := range zones.Items {
allZones.Insert(z.Name)
}
// Get the subset of zones not managed by k8s
var unmanagedZone string
var popped bool
unmanagedZones := allZones.Difference(managedZones)
// And select one of them at random.
if unmanagedZone, popped = unmanagedZones.PopAny(); !popped {
framework.Skipf("No unmanaged zones found.")
}
ginkgo.By("Creating a StorageClass for the unmanaged zone")
test := testsuites.StorageClassTest{
Name: "unmanaged_zone",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{"zone": unmanagedZone},
ClaimSize: "1Gi",
}
sc := newStorageClass(test, ns, suffix)
sc, err = c.StorageV1().StorageClasses().Create(sc)
framework.ExpectNoError(err)
defer deleteStorageClass(c, sc.Name)
ginkgo.By("Creating a claim and expecting it to timeout")
pvc := newClaim(test, ns, suffix)
pvc.Spec.StorageClassName = &sc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
framework.ExpectError(err)
e2elog.Logf(err.Error())
})
ginkgo.It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
// This case tests for the regressions of a bug fixed by PR #21268
// REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV
// not being deleted.
// NOTE: Polls until no PVs are detected, times out at 5 minutes.
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
const raceAttempts int = 100
var residualPVs []*v1.PersistentVolume
ginkgo.By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
test := testsuites.StorageClassTest{
Name: "deletion race",
Provisioner: "", // Use a native one based on current cloud provider
ClaimSize: "1Gi",
}
class := newStorageClass(test, ns, "race")
class, err := c.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err)
defer deleteStorageClass(c, class.Name)
// To increase chance of detection, attempt multiple iterations
for i := 0; i < raceAttempts; i++ {
suffix := fmt.Sprintf("race-%d", i)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
tmpClaim, err := framework.CreatePVC(c, ns, claim)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
}
ginkgo.By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
framework.ExpectNoError(err)
// Cleanup the test resources before breaking
defer deleteProvisionedVolumesAndDisks(c, residualPVs)
// Report indicators of regression
if len(residualPVs) > 0 {
e2elog.Logf("Remaining PersistentVolumes:")
for i, pv := range residualPVs {
e2elog.Logf("\t%d) %s", i+1, pv.Name)
}
framework.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs))
}
e2elog.Logf("0 PersistentVolumes remain.")
})
ginkgo.It("deletion should be idempotent", func() {
// This test ensures that deletion of a volume is idempotent.
// It creates a PV with Retain policy, deletes underlying AWS / GCE
// volume and changes the reclaim policy to Delete.
// PV controller should delete the PV even though the underlying volume
// is already deleted.
framework.SkipUnlessProviderIs("gce", "gke", "aws")
ginkgo.By("creating PD")
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err)
ginkgo.By("creating PV")
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "volume-idempotent-delete-",
},
Spec: v1.PersistentVolumeSpec{
// Use Retain to keep the PV, the test will change it to Delete
// when the time comes.
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
// PV is bound to non-existing PVC, so it's reclaim policy is
// executed immediately
ClaimRef: &v1.ObjectReference{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
UID: types.UID("01234567890"),
Namespace: ns,
Name: "dummy-claim-name",
},
},
}
switch framework.TestContext.Provider {
case "aws":
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: diskName,
},
}
case "gce", "gke":
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
},
}
}
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
framework.ExpectNoError(err)
ginkgo.By("waiting for the PV to get Released")
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, framework.PVReclaimingTimeout)
framework.ExpectNoError(err)
ginkgo.By("deleting the PD")
err = framework.DeletePVSource(&pv.Spec.PersistentVolumeSource)
framework.ExpectNoError(err)
ginkgo.By("changing the PV reclaim policy")
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete
pv, err = c.CoreV1().PersistentVolumes().Update(pv)
framework.ExpectNoError(err)
ginkgo.By("waiting for the PV to get deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout)
framework.ExpectNoError(err)
})
})
ginkgo.Describe("DynamicProvisioner External", func() {
ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() {
// external dynamic provisioner pods need additional permissions provided by the
// persistent-volume-provisioner clusterrole and a leader-locking role
serviceAccountName := "default"
subject := rbacv1.Subject{
Kind: rbacv1.ServiceAccountKind,
Namespace: ns,
Name: serviceAccountName,
}
err := auth.BindClusterRole(c.RbacV1(), "system:persistent-volume-provisioner", ns, subject)
framework.ExpectNoError(err)
roleName := "leader-locking-nfs-provisioner"
_, err = f.ClientSet.RbacV1().Roles(ns).Create(&rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: []rbacv1.PolicyRule{{
APIGroups: []string{""},
Resources: []string{"endpoints"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
}},
})
framework.ExpectNoError(err, "Failed to create leader-locking role")
err = auth.BindRoleInNamespace(c.RbacV1(), roleName, ns, subject)
framework.ExpectNoError(err)
err = auth.WaitForAuthorizationUpdate(c.AuthorizationV1(),
serviceaccount.MakeUsername(ns, serviceAccountName),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization")
ginkgo.By("creating an external dynamic provisioner pod")
pod := utils.StartExternalProvisioner(c, ns, externalPluginName)
defer e2epod.DeletePodOrFail(c, ns, pod.Name)
ginkgo.By("creating a StorageClass")
test := testsuites.StorageClassTest{
Client: c,
Name: "external provisioner test",
Provisioner: externalPluginName,
ClaimSize: "1500Mi",
ExpectedSize: "1500Mi",
}
test.Class = newStorageClass(test, ns, "external")
test.Claim = newClaim(test, ns, "external")
test.Claim.Spec.StorageClassName = &test.Class.Name
ginkgo.By("creating a claim with a external provisioning annotation")
test.TestDynamicProvisioning()
})
})
ginkgo.Describe("DynamicProvisioner Default", func() {
ginkgo.It("should create and delete default persistent volumes [Slow]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
ginkgo.By("creating a claim with no annotation")
test := testsuites.StorageClassTest{
Client: c,
Name: "default",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
}
test.Claim = newClaim(test, ns, "default")
test.TestDynamicProvisioning()
})
// Modifying the default storage class can be disruptive to other tests that depend on it
ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName, scErr := framework.GetDefaultStorageClassName(c)
if scErr != nil {
framework.Failf(scErr.Error())
}
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
}
ginkgo.By("setting the is-default StorageClass annotation to false")
verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true")
updateDefaultStorageClass(c, scName, "false")
ginkgo.By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
framework.ExpectError(err)
e2elog.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
})
// Modifying the default storage class can be disruptive to other tests that depend on it
ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName, scErr := framework.GetDefaultStorageClassName(c)
if scErr != nil {
framework.Failf(scErr.Error())
}
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
}
ginkgo.By("removing the is-default StorageClass annotation")
verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true")
updateDefaultStorageClass(c, scName, "")
ginkgo.By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
framework.ExpectNoError(err)
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
framework.ExpectError(err)
e2elog.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
})
})
framework.KubeDescribe("GlusterDynamicProvisioner", func() {
ginkgo.It("should create and delete persistent volumes [fast]", func() {
framework.SkipIfProviderIs("gke")
ginkgo.By("creating a Gluster DP server Pod")
pod := startGlusterDpServerPod(c, ns)
serverURL := "http://" + pod.Status.PodIP + ":8081"
ginkgo.By("creating a StorageClass")
test := testsuites.StorageClassTest{
Client: c,
Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
Parameters: map[string]string{"resturl": serverURL},
}
suffix := fmt.Sprintf("glusterdptest")
test.Class = newStorageClass(test, ns, suffix)
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
test.TestDynamicProvisioning()
})
})
ginkgo.Describe("Invalid AWS KMS key", func() {
ginkgo.It("should report an error and create no PV", func() {
framework.SkipUnlessProviderIs("aws")
test := testsuites.StorageClassTest{
Name: "AWS EBS with invalid KMS key",
Provisioner: "kubernetes.io/aws-ebs",
ClaimSize: "2Gi",
Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"},
}
ginkgo.By("creating a StorageClass")
suffix := fmt.Sprintf("invalid-aws")
class := newStorageClass(test, ns, suffix)
class, err := c.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err)
defer func() {
e2elog.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil))
}()
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
framework.ExpectNoError(err)
defer func() {
e2elog.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
}
}()
// Watch events until the message about invalid key appears.
// Event delivery is not reliable and it's used only as a quick way how to check if volume with wrong KMS
// key was not provisioned. If the event is not delivered, we check that the volume is not Bound for whole
// ClaimProvisionTimeout in the very same loop.
err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) {
events, err := c.CoreV1().Events(claim.Namespace).List(metav1.ListOptions{})
framework.ExpectNoError(err)
for _, event := range events.Items {
if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") {
return true, nil
}
}
pvc, err := c.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
if err != nil {
return true, err
}
if pvc.Status.Phase != v1.ClaimPending {
// The PVC was bound to something, i.e. PV was created for wrong KMS key. That's bad!
return true, fmt.Errorf("PVC got unexpectedly %s (to PV %q)", pvc.Status.Phase, pvc.Spec.VolumeName)
}
return false, nil
})
if err == wait.ErrWaitTimeout {
e2elog.Logf("The test missed event about failed provisioning, but checked that no volume was provisioned for %v", framework.ClaimProvisionTimeout)
err = nil
}
framework.ExpectNoError(err)
})
})
ginkgo.Describe("DynamicProvisioner delayed binding [Slow]", func() {
ginkgo.It("should create persistent volumes in the same zone as node after a pod mounting the claims is started", func() {
testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 1 /*pvcCount*/)
testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 3 /*pvcCount*/)
})
})
ginkgo.Describe("DynamicProvisioner allowedTopologies", func() {
ginkgo.It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() {
tests := []testsuites.StorageClassTest{
{
Name: "AllowedTopologies EBS storage class test",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
},
{
Name: "AllowedTopologies GCE PD storage class test",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
},
}
for _, test := range tests {
if !framework.ProviderIs(test.CloudProviders...) {
e2elog.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue
}
ginkgo.By("creating a claim with class with allowedTopologies set")
suffix := "topology"
test.Client = c
test.Class = newStorageClass(test, ns, suffix)
zone := getRandomClusterZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone)
test.Claim = newClaim(test, ns, suffix)
test.Claim.Spec.StorageClassName = &test.Class.Name
pv := test.TestDynamicProvisioning()
checkZoneFromLabelAndAffinity(pv, zone, true)
}
})
})
ginkgo.Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() {
ginkgo.It("should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", func() {
testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 1 /*pvcCount*/)
testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 3 /*pvcCount*/)
})
})
})
func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) {
sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{})
framework.ExpectNoError(err)
gomega.Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(gomega.Equal(expectedDefault))
}
func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) {
sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{})
framework.ExpectNoError(err)
if defaultStr == "" {
delete(sc.Annotations, storageutil.BetaIsDefaultStorageClassAnnotation)
delete(sc.Annotations, storageutil.IsDefaultStorageClassAnnotation)
} else {
if sc.Annotations == nil {
sc.Annotations = make(map[string]string)
}
sc.Annotations[storageutil.BetaIsDefaultStorageClassAnnotation] = defaultStr
sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr
}
sc, err = c.StorageV1().StorageClasses().Update(sc)
framework.ExpectNoError(err)
expectedDefault := false
if defaultStr == "true" {
expectedDefault = true
}
verifyDefaultStorageClass(c, scName, expectedDefault)
}
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
},
},
}
return &claim
}
func newClaim(t testsuites.StorageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
claim := getClaim(t.ClaimSize, ns)
if t.VolumeMode == v1.PersistentVolumeBlock {
blockVolumeMode := v1.PersistentVolumeBlock
claim.Spec.VolumeMode = &blockVolumeMode
}
return claim
}
func getDefaultPluginName() string {
switch {
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
return "kubernetes.io/gce-pd"
case framework.ProviderIs("aws"):
return "kubernetes.io/aws-ebs"
case framework.ProviderIs("openstack"):
return "kubernetes.io/cinder"
case framework.ProviderIs("vsphere"):
return "kubernetes.io/vsphere-volume"
case framework.ProviderIs("azure"):
return "kubernetes.io/azure-disk"
}
return ""
}
func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storage.StorageClass, zone string) {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: v1.LabelZoneFailureDomain,
Values: []string{zone},
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *storage.StorageClass {
pluginName := t.Provisioner
if pluginName == "" {
pluginName = getDefaultPluginName()
}
if suffix == "" {
suffix = "sc"
}
bindingMode := storage.VolumeBindingImmediate
if t.DelayBinding {
bindingMode = storage.VolumeBindingWaitForFirstConsumer
}
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, ns, suffix)
if t.AllowVolumeExpansion {
sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
}
return sc
}
func getStorageClass(
provisioner string,
parameters map[string]string,
bindingMode *storage.VolumeBindingMode,
ns string,
suffix string,
) *storage.StorageClass {
if bindingMode == nil {
defaultBindingMode := storage.VolumeBindingImmediate
bindingMode = &defaultBindingMode
}
return &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
// Name must be unique, so let's base it on namespace name
Name: ns + "-" + suffix,
},
Provisioner: provisioner,
Parameters: parameters,
VolumeBindingMode: bindingMode,
}
}
// TODO: remove when storage.k8s.io/v1beta1 is removed.
func newBetaStorageClass(t testsuites.StorageClassTest, suffix string) *storagebeta.StorageClass {
pluginName := t.Provisioner
if pluginName == "" {
pluginName = getDefaultPluginName()
}
if suffix == "" {
suffix = "default"
}
return &storagebeta.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: suffix + "-",
},
Provisioner: pluginName,
Parameters: t.Parameters,
}
}
func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{ | ObjectMeta: metav1.ObjectMeta{
GenerateName: "glusterdynamic-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "glusterdynamic-provisioner",
Image: "docker.io/humblec/glusterdynamic-provisioner:v1.0",
Args: []string{
"-config=" + "/etc/heketi/heketi.json",
},
Ports: []v1.ContainerPort{
{Name: "heketi", ContainerPort: 8081},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(c, provisionerPod))
ginkgo.By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
// waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's
// StorageClass. Returns either an error and nil values or the remaining PVs and their count.
func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) {
var remainingPVs []*v1.PersistentVolume
err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) {
remainingPVs = []*v1.PersistentVolume{}
allPVs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil {
return true, err
}
for _, pv := range allPVs.Items {
if pv.Spec.StorageClassName == scName {
remainingPVs = append(remainingPVs, &pv)
}
}
if len(remainingPVs) > 0 {
return false, nil // Poll until no PVs remain
}
return true, nil // No PVs remain
})
return remainingPVs, err
}
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(c clientset.Interface, className string) {
err := c.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.ExpectNoError(err)
}
}
// deleteProvisionedVolumes [gce||gke only] iteratively deletes persistent volumes and attached GCE PDs.
func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.PersistentVolume) {
for _, pv := range pvs {
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName))
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name))
}
}
func getRandomClusterZone(c clientset.Interface) string {
zones, err := framework.GetClusterZones(c)
framework.ExpectNoError(err)
gomega.Expect(len(zones)).ToNot(gomega.Equal(0))
zonesList := zones.UnsortedList()
return zonesList[rand.Intn(zones.Len())]
} | TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
}, |
test.py | # -*- coding: UTF-8 -*-
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../../../../pyutilities')
from mqservice import MsgQueueService
class Unittest(object):
def __init__(self):
|
self.mq_serv = MsgQueueService('test_queue')
self.mq_serv.set_callback(self._recv)
self.mq_serv.start(daemon=True)
def _recv(self, json_in):
print('Unittest _recv')
print(json_in)
def test_write_coil(self, uri, address, value):
obj = {
'method': 'write_coil',
'data': [uri, address, value]
}
print(self.mq_serv.request(self.queue, obj))
def test_write_register(self, uri, address, value):
obj = {
'method': 'write_register',
'data': [uri, address, value]
}
print(self.mq_serv.request(self.queue, obj))
def test_read_coils(self, uri, address, count):
obj = {
'method': 'read_coils',
'data': [uri, address, count]
}
print(self.mq_serv.request(self.queue, obj))
def test_read_discrete_inputs(self, uri, address, count):
obj = {
'method': 'read_discrete_inputs',
'data': [uri, address, count]
}
print(self.mq_serv.request(self.queue, obj))
def test_read_holding_registers(self, uri, address, count):
obj = {
'method': 'read_holding_registers',
'data': [uri, address, count]
}
print(self.mq_serv.request(self.queue, obj))
def test_read_input_registers(self, uri, address, count):
obj = {
'method': 'read_input_registers',
'data': [uri, address, count]
}
print(self.mq_serv.request(self.queue, obj))
if __name__ == "__main__":
t = Unittest()
uri = '127.0.0.1:5020'
t.test_write_coil(uri, '5', '0')
t.test_write_register(uri, '15', 9)
t.test_read_coils(uri, '5', 10)
t.test_read_discrete_inputs(uri, "10", "10")
t.test_read_holding_registers(uri, "100", "112")
t.test_read_input_registers(uri, "100", "10")
print('test finish') | self.queue = 'ModbusPlugin-TCP' |
parse-script.ts | import { ParserPlugin } from '@babel/parser'
import * as bt from '@babel/types'
import { NodePath } from 'ast-types'
import recast from 'recast'
import Map from 'ts-map'
import buildParser from './babel-parser'
import Documentation from './Documentation'
import { ParseOptions } from './parse' | import resolveExportedComponent from './utils/resolveExportedComponent'
import documentRequiredComponents from './utils/documentRequiredComponents'
const ERROR_MISSING_DEFINITION = 'No suitable component definition found'
export type Handler = (
doc: Documentation,
componentDefinition: NodePath,
ast: bt.File,
opt: ParseOptions
) => Promise<void>
export default async function parseScript(
source: string,
preHandlers: Handler[],
handlers: Handler[],
options: ParseOptions,
documentation?: Documentation,
forceSingleExport: boolean = false
): Promise<Documentation[] | undefined> {
const plugins: ParserPlugin[] = options.lang === 'ts' ? ['typescript'] : ['flow']
if (options.jsx) {
plugins.push('jsx')
}
const ast = cacher(() => recast.parse(source, { parser: buildParser({ plugins }) }), source)
if (!ast) {
throw new Error(`Unable to parse empty file "${options.filePath}"`)
}
const [componentDefinitions, ievSet] = resolveExportedComponent(ast)
if (componentDefinitions.size === 0) {
// if there is any immediately exported variable
// resolve their documentations
const docs = await documentRequiredComponents(documentation, ievSet, undefined, options)
// if we do not find any compoents throw
if (!docs.length) {
throw new Error(`${ERROR_MISSING_DEFINITION} on "${options.filePath}"`)
} else {
return docs
}
}
return executeHandlers(
preHandlers,
handlers,
componentDefinitions,
documentation,
ast,
options,
forceSingleExport
)
}
function executeHandlers(
preHandlers: Handler[],
localHandlers: Handler[],
componentDefinitions: Map<string, NodePath>,
documentation: Documentation | undefined,
ast: bt.File,
opt: ParseOptions,
forceSingleExport: boolean
): Promise<Documentation[] | undefined> {
const compDefs = componentDefinitions
.keys()
.filter(name => name && (!opt.nameFilter || opt.nameFilter.indexOf(name) > -1))
// default component first so in multiple exports in parse it is returned
.sort((_, name2) => (name2 === 'default' ? 1 : 0))
if (forceSingleExport && compDefs.length > 1) {
throw 'vue-docgen-api: multiple exports in a component file are not handled by docgen.parse, Please use "docgen.parseMulti" instead'
}
return Promise.all(
compDefs.map(async name => {
// If there are multiple exports and an initial documentation,
// it means the doc is coming from an SFC template.
// Only enrich the doc attached to the default export
// NOTE: module.exports is normalized to default
const doc =
(compDefs.length > 1 && name !== 'default' ? undefined : documentation) ||
new Documentation(opt.filePath)
const compDef = componentDefinitions.get(name) as NodePath
// execute all prehandlers in order
await preHandlers.reduce(async (_, handler) => {
await _
return await handler(doc, compDef, ast, opt)
}, Promise.resolve())
await Promise.all(localHandlers.map(async handler => await handler(doc, compDef, ast, opt)))
// end with setting of exportname
// to avoid dependencies names bleeding on the main components,
// do this step at the end of the function
doc.set('exportName', name)
return doc
})
)
} | import cacher from './utils/cacher' |
n0069_sqrtx.rs | /**
* [69] Sqrt(x)
*
* Implement int sqrt(int x).
*
* Compute and return the square root of x, where x is guaranteed to be a non-negative integer.
*
* Since the return type is an integer, the decimal digits are truncated and only the integer part of the result is returned.
*
* Example 1:
*
*
* Input: 4
* Output: 2
*
*
* Example 2:
*
*
* Input: 8
* Output: 2
* Explanation: The square root of 8 is 2.82842..., and since
* the decimal part is truncated, 2 is returned.
*
*
*/
pub struct Solution {} |
// Newton-Raphson for: root^2 - n = 0
// Tangent equation: y = 2 * root * x - (root^2 + n)
// Zero point: (root^2 + n) / (2 * root)
impl Solution {
pub fn my_sqrt(x: i32) -> i32 {
let mut size = x;
let mut base = 1;
while size > 1 {
let half = size / 2;
let mid = base + half;
if mid <= x / mid {
base = mid;
}
size -= half;
}
base
}
}
// submission codes end
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_69() {
assert_eq!(Solution::my_sqrt(8), 2);
assert_eq!(Solution::my_sqrt(16), 4);
assert_eq!(Solution::my_sqrt(17), 4);
assert_eq!(Solution::my_sqrt(81), 9);
assert_eq!(Solution::my_sqrt(82), 9);
assert_eq!(Solution::my_sqrt(100480577), 10024);
assert_eq!(Solution::my_sqrt(100480575), 10023);
assert_eq!(Solution::my_sqrt(100480575), 10023);
assert_eq!(Solution::my_sqrt(80), 8);
assert_eq!(Solution::my_sqrt(2), 1);
}
} |
// submission codes start here |
functions.go | package main
type FunctionType uint32
const (
FunctionTypeMedian = FunctionType(iota)
FunctionTypeAverage = FunctionType(iota)
FunctionTypeSum = FunctionType(iota)
FunctionTypeCPU = FunctionType(iota) | FunctionTypeMemory = FunctionType(iota)
)
type CallableFunction func([]float32) (float32, error)
type CallableFunctionMap map[FunctionType]CallableFunction
var FunctionRegistry = make(CallableFunctionMap) | |
errors.go | package types
// DONTCOVER
import (
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
)
// x/hello module sentinel errors | ErrSample = sdkerrors.Register(ModuleName, 1100, "sample error")
) | var ( |
sixMachineNoise4.ltd.py | # LTD simulation models / perturbances
# Attribute name case sensitive.
# Commented and empty lines are ignored
# Double quoted variable names in sysPert parameters ignored
# Uses Steps and no ACE filtering
# Perturbances
mirror.sysPerturbances = [
#'load 9 : step P 5 75 rel',
#'gen 5 : step Pm 5 -75 rel',
#'gen 5 : step Pref 5 -75 rel',
]
# Power Plants
mirror.sysPowerPlants ={'pp1': ["gen 2 1: 0.75 : step", "gen 2 2 : 0.25: step"],
'pp2': ["gen 3 : 0.75: step", "gen 4 : 0.25: step"],
}
mirror.NoiseAgent = ltd.perturbance.LoadNoiseAgent(mirror, 0.3, True)
# Testing of Balancing Authority input
mirror.sysBA = {
'BA1':{
'Area':1,
'B': "2.0 : perload", # MW/0.1 Hz
'AGCActionTime': 15.00, # seconds
'ACEgain' : 2.0,
'AGCType':'TLB : 0', # Tie-Line Bias
'UseAreaDroop' : False,
'AreaDroop' : 0.05,
'IncludeIACE' : True,
'IACEconditional': False,
'IACEwindow' : 15, # seconds - size of window
'IACEscale' : 1/15,
'IACEweight' : .3, # out of one - percent to mix with calculated ace
'IACEdeadband' : 0.0, # Hz # changed 10/6/19
'ACEFiltering': 'PI : 0.04 0.0001', # changed 10/6/19
'AGCDeadband' : None, # MW? -> not implemented
'GovDeadbandType' : 'nldroop', # changed 10/6/19
'GovDeadband' : .036, # Hz
'GovAlpha' : 0.016, # changed 10/6/19
'GovBeta' : 0.036, # changed 10/6/19
'CtrlGens': ['plant pp1 : .60 ',
'gen 1 : .40 : step'] | 'AGCActionTime': 15.00, # seconds
'ACEgain' : 2.0,
'AGCType':'TLB : 0', # Tie-Line Bias
'UseAreaDroop' : False,
'AreaDroop' : 0.05,
'IncludeIACE' : True,
'IACEconditional': False,
'IACEwindow' : 15, # seconds - size of window
'IACEscale' : 1/15,
'IACEweight' : .3, # out of one - percent to mix with calculated ace
'IACEdeadband' : 0.0, # Hz # changed 10/6/19
'ACEFiltering': 'PI : 0.04 0.0001', # changed 10/6/19
'AGCDeadband' : None, # MW? -> not implemented
'GovDeadbandType' : 'nldroop', # changed 10/6/19
'GovDeadband' : .036, # Hz
'GovAlpha' : 0.016, # changed 10/6/19
'GovBeta' : 0.036, # changed 10/6/19
'CtrlGens': ['plant pp2 : 1.0 ']
},
} | },
'BA2':{
'Area':2,
'B': "2.0 : perload", # MW/0.1 Hz |
usePortableTextEditorSelection.ts | import {createContext, useContext} from 'react'
import {EditorSelection} from '../../types/editor'
/**
* A React context for sharing the editor selection.
*/
export const PortableTextEditorSelectionContext = createContext<EditorSelection>(null)
/**
* Get the current editor selection from the React context.
*/
export const usePortableTextEditorSelection = () => {
const selection = useContext(PortableTextEditorSelectionContext)
if (selection === undefined) {
throw new Error(
`The \`usePortableTextEditorSelection\` hook must be used inside the <PortableTextEditor> component's context.`
)
}
return selection | } | |
test_ops.py | import tensorflow as tf
import os
import math
import time
import numpy
import unittest
tf.compat.v1.disable_eager_execution()
from sklearn.neighbors import KernelDensity
from kde_histogram import KDEHistogram
from histogram_max import *
session = None
def get_session():
global session
if not session:
session = tf.compat.v1.Session()
return session
def makeModel(nelem,nbins,start,end,kernel,bandwidth):
class TestModel():
def __init__(self):
self.nelem = nelem
self.nbins = nbins
self.start = start
self.end = end
self.kernel = kernel
self.bandwidth = bandwidth
self.values = tf.keras.layers.Input(shape=(self.nelem,))
self.weights = tf.keras.layers.Input(shape=(self.nelem,))
self.factors = tf.keras.layers.Input(shape=(self.nbins,))
self.hist = KDEHistogram(
nbins=self.nbins,
start=self.start,
end=self.end,
kernel="flat",
bandwidth_hist=self.bandwidth,
bandwidth_grad=self.bandwidth,
add_overflow = False
)([self.values,self.weights])
self.model = tf.keras.Model(inputs=[self.values,self.weights],outputs=[self.hist])
score = tf.keras.layers.Lambda(lambda x: tf.multiply(x[0],x[1]))([self.hist,self.factors])
self.score = tf.keras.layers.Lambda(lambda x: tf.reduce_sum(x))(score)
self.model = tf.keras.Model(inputs=[self.values,self.weights],outputs=[self.hist])
self.model.compile(loss='mse', optimizer='sgd') #dummy
self.gradients = tf.gradients(self.score,[self.values,self.weights])
def getHist(self,valuesArray,weightsArray):
return self.model.predict_on_batch([valuesArray,weightsArray])
def getScore(self,valuesArray,weightsArray,factorsArray):
sess = get_session()
scoreArray = sess.run(self.score, feed_dict = {
self.values: valuesArray,
self.weights: weightsArray,
self.factors: factorsArray
})
return scoreArray
def getGrad(self,valuesArray,weightsArray,factorsArray):
sess = get_session()
gradientsList = sess.run(self.gradients, feed_dict = {
self.values: valuesArray,
self.weights: weightsArray,
self.factors: factorsArray
})
return gradientsList
return TestModel()
class KDETest(unittest.TestCase):
def testHist(self):
for nelem in [1,23]:
for nbins in [1,2,17]:
for start in [-10,0,3]:
for d in [1,11]:
#nelem = 10
#nbins = 2
#start = -10
end = start+d
kernel='flat'
bandwidth = 1e-12
testModel = makeModel(nelem,nbins,start,end,kernel,bandwidth)
for i in range(0,5):
valuesArray = numpy.zeros((1,nelem))
weightsArray = numpy.zeros((1,nelem))
factorsArray = numpy.zeros((1,nbins))
for j in range(nelem):
valuesArray[0,j] = i*j+j*0.2-i*0.3+i*i+0.01
weightsArray[0,j] = i*i-10*j+i*j*j-0.25*i-2
for j in range(nbins):
factorsArray[0,j] = i*i*j-j*0.5+i*i*0.07-3
histArray = testModel.getHist(valuesArray,weightsArray)[0]
histArrayRef = numpy.histogram(
valuesArray[0,:],
bins=nbins,
range=(start,end),
weights=weightsArray[0,:]
)
for j in range(nbins):
self.assertEqual(histArray[j],histArrayRef[0][j])
def testGrad(self):
for nelem in [1,11]:
for nbins in [1,17]:
for start in [-10,0,3]:
for d in [1,11]:
for bandwidth in [1e-12,0.1,2]:
#nelem = 10
#nbins = 2
#start = -10
end = start+d
kernel='flat'
testModel = makeModel(nelem,nbins,start,end,kernel,bandwidth)
sess = get_session()
for i in range(3):
valuesArray = numpy.zeros((1,nelem))
weightsArray = numpy.zeros((1,nelem))
factorsArray = numpy.zeros((1,nbins))
for j in range(nelem):
valuesArray[0,j] = i*j+j*0.2-i*0.3+i*i+0.01
weightsArray[0,j] = i*i-10*j+i*j*j-0.25*i-2
for j in range(nbins):
factorsArray[0,j] = i*i*j-j*0.5+i*i*0.07-3
gradientsList = testModel.getGrad(
valuesArray,
weightsArray,
factorsArray
)
for j in range(nelem):
hV = 1e-2*(end-start)/nbins
hW = math.fabs(weightsArray[0,j]*1e-2)+1e-6
diff = numpy.zeros(valuesArray.shape)
diff[0,j]=1.
scoreValueDiff = (testModel.getScore(
valuesArray+diff*hV,
weightsArray,
factorsArray
) - testModel.getScore(
valuesArray-diff*hV,
weightsArray,
factorsArray
))/(2*hV)
scoreWeightDiff = (testModel.getScore(
valuesArray,
weightsArray+diff*hW,
factorsArray
) - testModel.getScore(
valuesArray,
weightsArray-diff*hW,
factorsArray
))/(2*hW)
'''
if bandwidth>hV:
print (
j,
gradientsList[0][0,j],
scoreValueDiff,
gradientsList[0][0,j]-scoreValueDiff,
hV
)
self.assertTrue(
math.fabs(gradientsList[0][0,j]-scoreValueDiff)<(20*hV)
)
'''
self.assertTrue(
math.fabs(gradientsList[1][0,j]-scoreWeightDiff)<(2*hW)
)
class HistogramMaxSampleTest(unittest.TestCase):
def testHistSingle(self):
sess = get_session()
for n in range(2,200,10):
hists = tf.compat.v1.placeholder(tf.float32, shape=(1, n,1))
histMax = histogram_max_sample_module.histogram_max_sample(hists)
for i in range(hists.shape[1]):
val = numpy.zeros(hists.shape)
val[0,i,0] = 1
self.assertEqual(sess.run(histMax,feed_dict={
hists:val |
def testHistSample(self):
sess = get_session()
hists = tf.compat.v1.placeholder(tf.float32, shape=(100, 200,1))
histMax = histogram_max_sample_module.histogram_max_sample(hists)
val = numpy.zeros(hists.shape)
for b in range(hists.shape[0]):
for n in range(5):
i = int(numpy.random.uniform(0,int(hists.shape[1])))
val[b,i,0] = numpy.random.uniform(0.1,0.9)
val/=numpy.sum(val,axis=1,keepdims=True)
result = numpy.zeros(hists.shape)
for t in range(10000):
sampled = sess.run(histMax,feed_dict={hists:val})
for b in range(hists.shape[0]):
result[b,int(sampled[b,0]),0] += 1.
result/=numpy.sum(result,axis=1,keepdims=True)
p = 0
f = 0
for b in range(hists.shape[0]):
for i in range(hists.shape[1]):
if val[b,i,0]>0.01:
if math.fabs(val[b,i,0]-result[b,i,0])/val[b,i,0]<0.1:
p += 1
else:
f += 1
#require >90% to pass
self.assertTrue(f<0.1*p)
if __name__ == '__main__':
test_suite = unittest.TestSuite()
test_suite.addTest(KDETest('testHist'))
test_suite.addTest(KDETest('testGrad'))
test_suite.addTest(HistogramMaxSampleTest('testHistSingle'))
test_suite.addTest(HistogramMaxSampleTest('testHistSample'))
unittest.runner.TextTestRunner(verbosity=2).run(test_suite) | })[0,0],i)
|
pacemaker_test.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
chained_bft::liveness::pacemaker::{
ExponentialTimeInterval, NewRoundEvent, NewRoundReason, Pacemaker, PacemakerTimeInterval,
},
util::mock_time_service::SimulatedTimeService,
};
use consensus_types::common::Round;
use futures::{executor::block_on, StreamExt};
use std::{sync::Arc, time::Duration};
#[test]
fn test_pacemaker_time_interval() {
let interval = ExponentialTimeInterval::new(Duration::from_millis(3000), 1.5, 2);
assert_eq!(3000, interval.get_round_duration(0).as_millis());
assert_eq!(4500, interval.get_round_duration(1).as_millis());
assert_eq!(
6750, /* 4500*1.5 */
interval.get_round_duration(2).as_millis()
);
// Test that there is no integer overflow
assert_eq!(6750, interval.get_round_duration(1000).as_millis());
}
#[test]
/// Verify that Pacemaker properly outputs local timeout events upon timeout
fn test_basic_timeout() {
let (mut pm, mut timeout_rx) = make_pacemaker();
// jump start the pacemaker
pm.process_certificates(Some(0), None, None);
for _ in 0..2 {
let round = block_on(timeout_rx.next()).unwrap();
// Here we just test timeout send retry,
// round for timeout is not changed as no timeout certificate was gathered at this point
assert_eq!(1, round);
pm.process_local_timeout(round);
}
}
#[test]
fn test_round_event_generation() {
let (mut pm, _) = make_pacemaker();
// Happy path with new QC
expect_qc(2, pm.process_certificates(Some(1), None, None));
// Old QC does not generate anything
assert!(pm.process_certificates(Some(1), None, None).is_none());
// A TC for a higher round
expect_timeout(3, pm.process_certificates(None, Some(2), None));
// In case both QC and TC are present choose the one with the higher value
expect_timeout(4, pm.process_certificates(Some(2), Some(3), None));
// In case both QC and TC are present with the same value, choose QC
expect_qc(5, pm.process_certificates(Some(4), Some(4), None));
}
fn make_pacemaker() -> (Pacemaker, channel::Receiver<Round>) {
let time_interval = Box::new(ExponentialTimeInterval::fixed(Duration::from_millis(2)));
let simulated_time = SimulatedTimeService::auto_advance_until(Duration::from_millis(4));
let (timeout_tx, timeout_rx) = channel::new_test(1_024);
(
Pacemaker::new(time_interval, Arc::new(simulated_time), timeout_tx),
timeout_rx,
)
}
fn | (round: Round, event: Option<NewRoundEvent>) {
let event = event.unwrap();
assert_eq!(round, event.round);
assert_eq!(event.reason, NewRoundReason::QCReady);
}
fn expect_timeout(round: Round, event: Option<NewRoundEvent>) {
let event = event.unwrap();
assert_eq!(round, event.round);
assert_eq!(event.reason, NewRoundReason::Timeout);
}
| expect_qc |
intravisit.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! HIR walker for walking the contents of nodes.
//!
//! **For an overview of the visitor strategy, see the docs on the
//! `super::itemlikevisit::ItemLikeVisitor` trait.**
//!
//! If you have decided to use this visitor, here are some general
//! notes on how to do it:
//!
//! Each overridden visit method has full control over what
//! happens with its node, it can do its own traversal of the node's children,
//! call `intravisit::walk_*` to apply the default traversal algorithm, or prevent
//! deeper traversal by doing nothing.
//!
//! When visiting the HIR, the contents of nested items are NOT visited
//! by default. This is different from the AST visitor, which does a deep walk.
//! Hence this module is called `intravisit`; see the method `visit_nested_item`
//! for more details.
//!
//! Note: it is an important invariant that the default visitor walks
//! the body of a function in "execution order" - more concretely, if
//! we consider the reverse post-order (RPO) of the CFG implied by the HIR,
//! then a pre-order traversal of the HIR is consistent with the CFG RPO
//! on the *initial CFG point* of each HIR node, while a post-order traversal
//! of the HIR is consistent with the CFG RPO on each *final CFG point* of
//! each CFG node.
//!
//! One thing that follows is that if HIR node A always starts/ends executing
//! before HIR node B, then A appears in traversal pre/postorder before B,
//! respectively. (This follows from RPO respecting CFG domination).
//!
//! This order consistency is required in a few places in rustc, for
//! example generator inference, and possibly also HIR borrowck.
use syntax::ast::{NodeId, CRATE_NODE_ID, Ident, Name, Attribute};
use syntax_pos::Span;
use hir::*;
use hir::def::Def;
use hir::map::{self, Map};
use super::itemlikevisit::DeepVisitor;
use std::cmp;
use std::u32;
use std::result::Result::Err;
#[derive(Copy, Clone)]
pub enum FnKind<'a> {
/// #[xxx] pub async/const/extern "Abi" fn foo()
ItemFn(Name, &'a Generics, FnHeader, &'a Visibility, &'a [Attribute]),
/// fn foo(&self)
Method(Ident, &'a MethodSig, Option<&'a Visibility>, &'a [Attribute]),
/// |x, y| {}
Closure(&'a [Attribute]),
}
impl<'a> FnKind<'a> {
pub fn attrs(&self) -> &'a [Attribute] {
match *self {
FnKind::ItemFn(.., attrs) => attrs,
FnKind::Method(.., attrs) => attrs,
FnKind::Closure(attrs) => attrs,
}
}
}
/// Specifies what nested things a visitor wants to visit. The most
/// common choice is `OnlyBodies`, which will cause the visitor to
/// visit fn bodies for fns that it encounters, but skip over nested
/// item-like things.
///
/// See the comments on `ItemLikeVisitor` for more details on the overall
/// visit strategy.
pub enum NestedVisitorMap<'this, 'tcx: 'this> {
/// Do not visit any nested things. When you add a new
/// "non-nested" thing, you will want to audit such uses to see if
/// they remain valid.
///
/// Use this if you are only walking some particular kind of tree
/// (i.e., a type, or fn signature) and you don't want to thread a
/// HIR map around.
None,
/// Do not visit nested item-like things, but visit nested things
/// that are inside of an item-like.
///
/// **This is the most common choice.** A very common pattern is
/// to use `visit_all_item_likes()` as an outer loop,
/// and to have the visitor that visits the contents of each item
/// using this setting.
OnlyBodies(&'this Map<'tcx>),
/// Visit all nested things, including item-likes.
///
/// **This is an unusual choice.** It is used when you want to
/// process everything within their lexical context. Typically you
/// kick off the visit by doing `walk_krate()`.
All(&'this Map<'tcx>),
}
impl<'this, 'tcx> NestedVisitorMap<'this, 'tcx> {
/// Returns the map to use for an "intra item-like" thing (if any).
/// e.g., function body.
pub fn intra(self) -> Option<&'this Map<'tcx>> {
match self {
NestedVisitorMap::None => None,
NestedVisitorMap::OnlyBodies(map) => Some(map),
NestedVisitorMap::All(map) => Some(map),
}
}
/// Returns the map to use for an "item-like" thing (if any).
/// e.g., item, impl-item.
pub fn inter(self) -> Option<&'this Map<'tcx>> {
match self {
NestedVisitorMap::None => None,
NestedVisitorMap::OnlyBodies(_) => None,
NestedVisitorMap::All(map) => Some(map),
}
}
}
/// Each method of the Visitor trait is a hook to be potentially
/// overridden. Each method's default implementation recursively visits
/// the substructure of the input via the corresponding `walk` method;
/// e.g. the `visit_mod` method by default calls `intravisit::walk_mod`.
///
/// Note that this visitor does NOT visit nested items by default
/// (this is why the module is called `intravisit`, to distinguish it
/// from the AST's `visit` module, which acts differently). If you
/// simply want to visit all items in the crate in some order, you
/// should call `Crate::visit_all_items`. Otherwise, see the comment
/// on `visit_nested_item` for details on how to visit nested items.
///
/// If you want to ensure that your code handles every variant
/// explicitly, you need to override each method. (And you also need
/// to monitor future changes to `Visitor` in case a new method with a
/// new default implementation gets introduced.)
pub trait Visitor<'v> : Sized {
///////////////////////////////////////////////////////////////////////////
// Nested items.
/// The default versions of the `visit_nested_XXX` routines invoke
/// this method to get a map to use. By selecting an enum variant,
/// you control which kinds of nested HIR are visited; see
/// `NestedVisitorMap` for details. By "nested HIR", we are
/// referring to bits of HIR that are not directly embedded within
/// one another but rather indirectly, through a table in the
/// crate. This is done to control dependencies during incremental
/// compilation: the non-inline bits of HIR can be tracked and
/// hashed separately.
///
/// **If for some reason you want the nested behavior, but don't
/// have a `Map` at your disposal:** then you should override the
/// `visit_nested_XXX` methods, and override this method to
/// `panic!()`. This way, if a new `visit_nested_XXX` variant is
/// added in the future, we will see the panic in your code and
/// fix it appropriately.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v>;
/// Invoked when a nested item is encountered. By default does
/// nothing unless you override `nested_visit_map` to return
/// `Some(_)`, in which case it will walk the item. **You probably
/// don't want to override this method** -- instead, override
/// `nested_visit_map` or use the "shallow" or "deep" visit
/// patterns described on `itemlikevisit::ItemLikeVisitor`. The only
/// reason to override this method is if you want a nested pattern
/// but cannot supply a `Map`; see `nested_visit_map` for advice.
#[allow(unused_variables)]
fn visit_nested_item(&mut self, id: ItemId) {
let opt_item = self.nested_visit_map().inter().map(|map| map.expect_item(id.id));
if let Some(item) = opt_item {
self.visit_item(item);
}
}
/// Like `visit_nested_item()`, but for trait items. See
/// `visit_nested_item()` for advice on when to override this
/// method.
#[allow(unused_variables)]
fn visit_nested_trait_item(&mut self, id: TraitItemId) {
let opt_item = self.nested_visit_map().inter().map(|map| map.trait_item(id));
if let Some(item) = opt_item {
self.visit_trait_item(item);
}
}
/// Like `visit_nested_item()`, but for impl items. See
/// `visit_nested_item()` for advice on when to override this
/// method.
#[allow(unused_variables)]
fn visit_nested_impl_item(&mut self, id: ImplItemId) {
let opt_item = self.nested_visit_map().inter().map(|map| map.impl_item(id));
if let Some(item) = opt_item {
self.visit_impl_item(item);
}
}
/// Invoked to visit the body of a function, method or closure. Like
/// visit_nested_item, does nothing by default unless you override
/// `nested_visit_map` to return `Some(_)`, in which case it will walk the
/// body.
fn visit_nested_body(&mut self, id: BodyId) {
let opt_body = self.nested_visit_map().intra().map(|map| map.body(id));
if let Some(body) = opt_body {
self.visit_body(body);
}
}
/// Visit the top-level item and (optionally) nested items / impl items. See
/// `visit_nested_item` for details.
fn visit_item(&mut self, i: &'v Item) {
walk_item(self, i)
}
fn visit_body(&mut self, b: &'v Body) {
walk_body(self, b);
}
/// When invoking `visit_all_item_likes()`, you need to supply an
/// item-like visitor. This method converts a "intra-visit"
/// visitor into an item-like visitor that walks the entire tree.
/// If you use this, you probably don't want to process the
/// contents of nested item-like things, since the outer loop will
/// visit them as well.
fn as_deep_visitor<'s>(&'s mut self) -> DeepVisitor<'s, Self> {
DeepVisitor::new(self)
}
///////////////////////////////////////////////////////////////////////////
fn visit_id(&mut self, _node_id: NodeId) {
// Nothing to do.
}
fn visit_def_mention(&mut self, _def: Def) {
// Nothing to do.
}
fn visit_name(&mut self, _span: Span, _name: Name) {
// Nothing to do.
}
fn visit_ident(&mut self, ident: Ident) {
walk_ident(self, ident)
}
fn visit_mod(&mut self, m: &'v Mod, _s: Span, n: NodeId) {
walk_mod(self, m, n)
}
fn visit_foreign_item(&mut self, i: &'v ForeignItem) {
walk_foreign_item(self, i)
}
fn visit_local(&mut self, l: &'v Local) {
walk_local(self, l)
}
fn visit_block(&mut self, b: &'v Block) {
walk_block(self, b)
}
fn visit_stmt(&mut self, s: &'v Stmt) {
walk_stmt(self, s)
}
fn visit_arm(&mut self, a: &'v Arm) {
walk_arm(self, a)
}
fn visit_pat(&mut self, p: &'v Pat) {
walk_pat(self, p)
}
fn visit_decl(&mut self, d: &'v Decl) {
walk_decl(self, d)
}
fn visit_anon_const(&mut self, c: &'v AnonConst) {
walk_anon_const(self, c)
}
fn visit_expr(&mut self, ex: &'v Expr) {
walk_expr(self, ex)
}
fn visit_ty(&mut self, t: &'v Ty) {
walk_ty(self, t)
}
fn visit_generic_param(&mut self, p: &'v GenericParam) {
walk_generic_param(self, p)
}
fn visit_generics(&mut self, g: &'v Generics) {
walk_generics(self, g)
}
fn visit_where_predicate(&mut self, predicate: &'v WherePredicate) {
walk_where_predicate(self, predicate)
}
fn visit_fn_decl(&mut self, fd: &'v FnDecl) {
walk_fn_decl(self, fd)
}
fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: BodyId, s: Span, id: NodeId) {
walk_fn(self, fk, fd, b, s, id)
}
fn visit_trait_item(&mut self, ti: &'v TraitItem) {
walk_trait_item(self, ti)
}
fn visit_trait_item_ref(&mut self, ii: &'v TraitItemRef) {
walk_trait_item_ref(self, ii)
}
fn visit_impl_item(&mut self, ii: &'v ImplItem) {
walk_impl_item(self, ii)
}
fn visit_impl_item_ref(&mut self, ii: &'v ImplItemRef) {
walk_impl_item_ref(self, ii)
}
fn visit_trait_ref(&mut self, t: &'v TraitRef) {
walk_trait_ref(self, t)
}
fn visit_param_bound(&mut self, bounds: &'v GenericBound) {
walk_param_bound(self, bounds)
}
fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef, m: TraitBoundModifier) {
walk_poly_trait_ref(self, t, m)
}
fn visit_variant_data(&mut self,
s: &'v VariantData,
_: Name,
_: &'v Generics,
_parent_id: NodeId,
_: Span) {
walk_struct_def(self, s)
}
fn visit_struct_field(&mut self, s: &'v StructField) {
walk_struct_field(self, s)
}
fn visit_enum_def(&mut self,
enum_definition: &'v EnumDef,
generics: &'v Generics,
item_id: NodeId,
_: Span) {
walk_enum_def(self, enum_definition, generics, item_id)
}
fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics, item_id: NodeId) {
walk_variant(self, v, g, item_id)
}
fn visit_label(&mut self, label: &'v Label) {
walk_label(self, label)
}
fn visit_generic_arg(&mut self, generic_arg: &'v GenericArg) {
match generic_arg {
GenericArg::Lifetime(lt) => self.visit_lifetime(lt),
GenericArg::Type(ty) => self.visit_ty(ty),
}
}
fn visit_lifetime(&mut self, lifetime: &'v Lifetime) {
walk_lifetime(self, lifetime)
}
fn visit_qpath(&mut self, qpath: &'v QPath, id: HirId, span: Span) {
walk_qpath(self, qpath, id, span)
}
fn visit_path(&mut self, path: &'v Path, _id: HirId) {
walk_path(self, path)
}
fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment) {
walk_path_segment(self, path_span, path_segment)
}
fn visit_generic_args(&mut self, path_span: Span, generic_args: &'v GenericArgs) {
walk_generic_args(self, path_span, generic_args)
}
fn | (&mut self, type_binding: &'v TypeBinding) {
walk_assoc_type_binding(self, type_binding)
}
fn visit_attribute(&mut self, _attr: &'v Attribute) {
}
fn visit_macro_def(&mut self, macro_def: &'v MacroDef) {
walk_macro_def(self, macro_def)
}
fn visit_vis(&mut self, vis: &'v Visibility) {
walk_vis(self, vis)
}
fn visit_associated_item_kind(&mut self, kind: &'v AssociatedItemKind) {
walk_associated_item_kind(self, kind);
}
fn visit_defaultness(&mut self, defaultness: &'v Defaultness) {
walk_defaultness(self, defaultness);
}
}
/// Walks the contents of a crate. See also `Crate::visit_all_items`.
pub fn walk_crate<'v, V: Visitor<'v>>(visitor: &mut V, krate: &'v Crate) {
visitor.visit_mod(&krate.module, krate.span, CRATE_NODE_ID);
walk_list!(visitor, visit_attribute, &krate.attrs);
walk_list!(visitor, visit_macro_def, &krate.exported_macros);
}
pub fn walk_macro_def<'v, V: Visitor<'v>>(visitor: &mut V, macro_def: &'v MacroDef) {
visitor.visit_id(macro_def.id);
visitor.visit_name(macro_def.span, macro_def.name);
walk_list!(visitor, visit_attribute, ¯o_def.attrs);
}
pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod, mod_node_id: NodeId) {
visitor.visit_id(mod_node_id);
for &item_id in &module.item_ids {
visitor.visit_nested_item(item_id);
}
}
pub fn walk_body<'v, V: Visitor<'v>>(visitor: &mut V, body: &'v Body) {
for argument in &body.arguments {
visitor.visit_id(argument.id);
visitor.visit_pat(&argument.pat);
}
visitor.visit_expr(&body.value);
}
pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) {
// Intentionally visiting the expr first - the initialization expr
// dominates the local's definition.
walk_list!(visitor, visit_expr, &local.init);
walk_list!(visitor, visit_attribute, local.attrs.iter());
visitor.visit_id(local.id);
visitor.visit_pat(&local.pat);
walk_list!(visitor, visit_ty, &local.ty);
}
pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, ident: Ident) {
visitor.visit_name(ident.span, ident.name);
}
pub fn walk_label<'v, V: Visitor<'v>>(visitor: &mut V, label: &'v Label) {
visitor.visit_ident(label.ident);
}
pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) {
visitor.visit_id(lifetime.id);
match lifetime.name {
LifetimeName::Param(ParamName::Plain(ident)) => {
visitor.visit_ident(ident);
}
LifetimeName::Param(ParamName::Fresh(_)) |
LifetimeName::Static |
LifetimeName::Implicit |
LifetimeName::Underscore => {}
}
}
pub fn walk_poly_trait_ref<'v, V>(visitor: &mut V,
trait_ref: &'v PolyTraitRef,
_modifier: TraitBoundModifier)
where V: Visitor<'v>
{
walk_list!(visitor, visit_generic_param, &trait_ref.bound_generic_params);
visitor.visit_trait_ref(&trait_ref.trait_ref);
}
pub fn walk_trait_ref<'v, V>(visitor: &mut V, trait_ref: &'v TraitRef)
where V: Visitor<'v>
{
visitor.visit_id(trait_ref.ref_id);
visitor.visit_path(&trait_ref.path, trait_ref.hir_ref_id)
}
pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) {
visitor.visit_vis(&item.vis);
visitor.visit_name(item.span, item.name);
match item.node {
ItemKind::ExternCrate(orig_name) => {
visitor.visit_id(item.id);
if let Some(orig_name) = orig_name {
visitor.visit_name(item.span, orig_name);
}
}
ItemKind::Use(ref path, _) => {
visitor.visit_id(item.id);
visitor.visit_path(path, item.hir_id);
}
ItemKind::Static(ref typ, _, body) |
ItemKind::Const(ref typ, body) => {
visitor.visit_id(item.id);
visitor.visit_ty(typ);
visitor.visit_nested_body(body);
}
ItemKind::Fn(ref declaration, header, ref generics, body_id) => {
visitor.visit_fn(FnKind::ItemFn(item.name,
generics,
header,
&item.vis,
&item.attrs),
declaration,
body_id,
item.span,
item.id)
}
ItemKind::Mod(ref module) => {
// visit_mod() takes care of visiting the Item's NodeId
visitor.visit_mod(module, item.span, item.id)
}
ItemKind::ForeignMod(ref foreign_module) => {
visitor.visit_id(item.id);
walk_list!(visitor, visit_foreign_item, &foreign_module.items);
}
ItemKind::GlobalAsm(_) => {
visitor.visit_id(item.id);
}
ItemKind::Ty(ref typ, ref type_parameters) => {
visitor.visit_id(item.id);
visitor.visit_ty(typ);
visitor.visit_generics(type_parameters)
}
ItemKind::Existential(ExistTy {ref generics, ref bounds, impl_trait_fn}) => {
visitor.visit_id(item.id);
walk_generics(visitor, generics);
walk_list!(visitor, visit_param_bound, bounds);
if let Some(impl_trait_fn) = impl_trait_fn {
visitor.visit_def_mention(Def::Fn(impl_trait_fn))
}
}
ItemKind::Enum(ref enum_definition, ref type_parameters) => {
visitor.visit_generics(type_parameters);
// visit_enum_def() takes care of visiting the Item's NodeId
visitor.visit_enum_def(enum_definition, type_parameters, item.id, item.span)
}
ItemKind::Impl(
..,
ref type_parameters,
ref opt_trait_reference,
ref typ,
ref impl_item_refs
) => {
visitor.visit_id(item.id);
visitor.visit_generics(type_parameters);
walk_list!(visitor, visit_trait_ref, opt_trait_reference);
visitor.visit_ty(typ);
walk_list!(visitor, visit_impl_item_ref, impl_item_refs);
}
ItemKind::Struct(ref struct_definition, ref generics) |
ItemKind::Union(ref struct_definition, ref generics) => {
visitor.visit_generics(generics);
visitor.visit_id(item.id);
visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span);
}
ItemKind::Trait(.., ref generics, ref bounds, ref trait_item_refs) => {
visitor.visit_id(item.id);
visitor.visit_generics(generics);
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_trait_item_ref, trait_item_refs);
}
ItemKind::TraitAlias(ref generics, ref bounds) => {
visitor.visit_id(item.id);
visitor.visit_generics(generics);
walk_list!(visitor, visit_param_bound, bounds);
}
}
walk_list!(visitor, visit_attribute, &item.attrs);
}
pub fn walk_enum_def<'v, V: Visitor<'v>>(visitor: &mut V,
enum_definition: &'v EnumDef,
generics: &'v Generics,
item_id: NodeId) {
visitor.visit_id(item_id);
walk_list!(visitor,
visit_variant,
&enum_definition.variants,
generics,
item_id);
}
pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V,
variant: &'v Variant,
generics: &'v Generics,
parent_item_id: NodeId) {
visitor.visit_name(variant.span, variant.node.name);
visitor.visit_variant_data(&variant.node.data,
variant.node.name,
generics,
parent_item_id,
variant.span);
walk_list!(visitor, visit_anon_const, &variant.node.disr_expr);
walk_list!(visitor, visit_attribute, &variant.node.attrs);
}
pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) {
visitor.visit_id(typ.id);
match typ.node {
TyKind::Slice(ref ty) => {
visitor.visit_ty(ty)
}
TyKind::Ptr(ref mutable_type) => {
visitor.visit_ty(&mutable_type.ty)
}
TyKind::Rptr(ref lifetime, ref mutable_type) => {
visitor.visit_lifetime(lifetime);
visitor.visit_ty(&mutable_type.ty)
}
TyKind::Never => {},
TyKind::Tup(ref tuple_element_types) => {
walk_list!(visitor, visit_ty, tuple_element_types);
}
TyKind::BareFn(ref function_declaration) => {
walk_list!(visitor, visit_generic_param, &function_declaration.generic_params);
visitor.visit_fn_decl(&function_declaration.decl);
}
TyKind::Path(ref qpath) => {
visitor.visit_qpath(qpath, typ.hir_id, typ.span);
}
TyKind::Array(ref ty, ref length) => {
visitor.visit_ty(ty);
visitor.visit_anon_const(length)
}
TyKind::TraitObject(ref bounds, ref lifetime) => {
for bound in bounds {
visitor.visit_poly_trait_ref(bound, TraitBoundModifier::None);
}
visitor.visit_lifetime(lifetime);
}
TyKind::Typeof(ref expression) => {
visitor.visit_anon_const(expression)
}
TyKind::Infer | TyKind::Err => {}
}
}
pub fn walk_qpath<'v, V: Visitor<'v>>(visitor: &mut V, qpath: &'v QPath, id: HirId, span: Span) {
match *qpath {
QPath::Resolved(ref maybe_qself, ref path) => {
if let Some(ref qself) = *maybe_qself {
visitor.visit_ty(qself);
}
visitor.visit_path(path, id)
}
QPath::TypeRelative(ref qself, ref segment) => {
visitor.visit_ty(qself);
visitor.visit_path_segment(span, segment);
}
}
}
pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path) {
visitor.visit_def_mention(path.def);
for segment in &path.segments {
visitor.visit_path_segment(path.span, segment);
}
}
pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V,
path_span: Span,
segment: &'v PathSegment) {
visitor.visit_ident(segment.ident);
if let Some(ref args) = segment.args {
visitor.visit_generic_args(path_span, args);
}
}
pub fn walk_generic_args<'v, V: Visitor<'v>>(visitor: &mut V,
_path_span: Span,
generic_args: &'v GenericArgs) {
walk_list!(visitor, visit_generic_arg, &generic_args.args);
walk_list!(visitor, visit_assoc_type_binding, &generic_args.bindings);
}
pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(visitor: &mut V,
type_binding: &'v TypeBinding) {
visitor.visit_id(type_binding.id);
visitor.visit_ident(type_binding.ident);
visitor.visit_ty(&type_binding.ty);
}
pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) {
visitor.visit_id(pattern.id);
match pattern.node {
PatKind::TupleStruct(ref qpath, ref children, _) => {
visitor.visit_qpath(qpath, pattern.hir_id, pattern.span);
walk_list!(visitor, visit_pat, children);
}
PatKind::Path(ref qpath) => {
visitor.visit_qpath(qpath, pattern.hir_id, pattern.span);
}
PatKind::Struct(ref qpath, ref fields, _) => {
visitor.visit_qpath(qpath, pattern.hir_id, pattern.span);
for field in fields {
visitor.visit_id(field.node.id);
visitor.visit_ident(field.node.ident);
visitor.visit_pat(&field.node.pat)
}
}
PatKind::Tuple(ref tuple_elements, _) => {
walk_list!(visitor, visit_pat, tuple_elements);
}
PatKind::Box(ref subpattern) |
PatKind::Ref(ref subpattern, _) => {
visitor.visit_pat(subpattern)
}
PatKind::Binding(_, canonical_id, ident, ref optional_subpattern) => {
visitor.visit_def_mention(Def::Local(canonical_id));
visitor.visit_ident(ident);
walk_list!(visitor, visit_pat, optional_subpattern);
}
PatKind::Lit(ref expression) => visitor.visit_expr(expression),
PatKind::Range(ref lower_bound, ref upper_bound, _) => {
visitor.visit_expr(lower_bound);
visitor.visit_expr(upper_bound)
}
PatKind::Wild => (),
PatKind::Slice(ref prepatterns, ref slice_pattern, ref postpatterns) => {
walk_list!(visitor, visit_pat, prepatterns);
walk_list!(visitor, visit_pat, slice_pattern);
walk_list!(visitor, visit_pat, postpatterns);
}
}
}
pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem) {
visitor.visit_id(foreign_item.id);
visitor.visit_vis(&foreign_item.vis);
visitor.visit_name(foreign_item.span, foreign_item.name);
match foreign_item.node {
ForeignItemKind::Fn(ref function_declaration, ref param_names, ref generics) => {
visitor.visit_generics(generics);
visitor.visit_fn_decl(function_declaration);
for ¶m_name in param_names {
visitor.visit_ident(param_name);
}
}
ForeignItemKind::Static(ref typ, _) => visitor.visit_ty(typ),
ForeignItemKind::Type => (),
}
walk_list!(visitor, visit_attribute, &foreign_item.attrs);
}
pub fn walk_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v GenericBound) {
match *bound {
GenericBound::Trait(ref typ, modifier) => {
visitor.visit_poly_trait_ref(typ, modifier);
}
GenericBound::Outlives(ref lifetime) => visitor.visit_lifetime(lifetime),
}
}
pub fn walk_generic_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v GenericParam) {
visitor.visit_id(param.id);
walk_list!(visitor, visit_attribute, ¶m.attrs);
match param.name {
ParamName::Plain(ident) => visitor.visit_ident(ident),
ParamName::Fresh(_) => {}
}
match param.kind {
GenericParamKind::Lifetime { .. } => {}
GenericParamKind::Type { ref default, .. } => walk_list!(visitor, visit_ty, default),
}
walk_list!(visitor, visit_param_bound, ¶m.bounds);
}
pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics) {
walk_list!(visitor, visit_generic_param, &generics.params);
visitor.visit_id(generics.where_clause.id);
walk_list!(visitor, visit_where_predicate, &generics.where_clause.predicates);
}
pub fn walk_where_predicate<'v, V: Visitor<'v>>(
visitor: &mut V,
predicate: &'v WherePredicate)
{
match predicate {
&WherePredicate::BoundPredicate(WhereBoundPredicate{ref bounded_ty,
ref bounds,
ref bound_generic_params,
..}) => {
visitor.visit_ty(bounded_ty);
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_generic_param, bound_generic_params);
}
&WherePredicate::RegionPredicate(WhereRegionPredicate{ref lifetime,
ref bounds,
..}) => {
visitor.visit_lifetime(lifetime);
walk_list!(visitor, visit_param_bound, bounds);
}
&WherePredicate::EqPredicate(WhereEqPredicate{id,
ref lhs_ty,
ref rhs_ty,
..}) => {
visitor.visit_id(id);
visitor.visit_ty(lhs_ty);
visitor.visit_ty(rhs_ty);
}
}
}
pub fn walk_fn_ret_ty<'v, V: Visitor<'v>>(visitor: &mut V, ret_ty: &'v FunctionRetTy) {
if let Return(ref output_ty) = *ret_ty {
visitor.visit_ty(output_ty)
}
}
pub fn walk_fn_decl<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl) {
for ty in &function_declaration.inputs {
visitor.visit_ty(ty)
}
walk_fn_ret_ty(visitor, &function_declaration.output)
}
pub fn walk_fn_kind<'v, V: Visitor<'v>>(visitor: &mut V, function_kind: FnKind<'v>) {
match function_kind {
FnKind::ItemFn(_, generics, ..) => {
visitor.visit_generics(generics);
}
FnKind::Method(..) |
FnKind::Closure(_) => {}
}
}
pub fn walk_fn<'v, V: Visitor<'v>>(visitor: &mut V,
function_kind: FnKind<'v>,
function_declaration: &'v FnDecl,
body_id: BodyId,
_span: Span,
id: NodeId) {
visitor.visit_id(id);
visitor.visit_fn_decl(function_declaration);
walk_fn_kind(visitor, function_kind);
visitor.visit_nested_body(body_id)
}
pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem) {
visitor.visit_ident(trait_item.ident);
walk_list!(visitor, visit_attribute, &trait_item.attrs);
visitor.visit_generics(&trait_item.generics);
match trait_item.node {
TraitItemKind::Const(ref ty, default) => {
visitor.visit_id(trait_item.id);
visitor.visit_ty(ty);
walk_list!(visitor, visit_nested_body, default);
}
TraitItemKind::Method(ref sig, TraitMethod::Required(ref param_names)) => {
visitor.visit_id(trait_item.id);
visitor.visit_fn_decl(&sig.decl);
for ¶m_name in param_names {
visitor.visit_ident(param_name);
}
}
TraitItemKind::Method(ref sig, TraitMethod::Provided(body_id)) => {
visitor.visit_fn(FnKind::Method(trait_item.ident,
sig,
None,
&trait_item.attrs),
&sig.decl,
body_id,
trait_item.span,
trait_item.id);
}
TraitItemKind::Type(ref bounds, ref default) => {
visitor.visit_id(trait_item.id);
walk_list!(visitor, visit_param_bound, bounds);
walk_list!(visitor, visit_ty, default);
}
}
}
pub fn walk_trait_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, trait_item_ref: &'v TraitItemRef) {
// NB: Deliberately force a compilation error if/when new fields are added.
let TraitItemRef { id, ident, ref kind, span: _, ref defaultness } = *trait_item_ref;
visitor.visit_nested_trait_item(id);
visitor.visit_ident(ident);
visitor.visit_associated_item_kind(kind);
visitor.visit_defaultness(defaultness);
}
pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem) {
// NB: Deliberately force a compilation error if/when new fields are added.
let ImplItem {
id: _,
hir_id: _,
ident,
ref vis,
ref defaultness,
ref attrs,
ref generics,
ref node,
span: _,
} = *impl_item;
visitor.visit_ident(ident);
visitor.visit_vis(vis);
visitor.visit_defaultness(defaultness);
walk_list!(visitor, visit_attribute, attrs);
visitor.visit_generics(generics);
match *node {
ImplItemKind::Const(ref ty, body) => {
visitor.visit_id(impl_item.id);
visitor.visit_ty(ty);
visitor.visit_nested_body(body);
}
ImplItemKind::Method(ref sig, body_id) => {
visitor.visit_fn(FnKind::Method(impl_item.ident,
sig,
Some(&impl_item.vis),
&impl_item.attrs),
&sig.decl,
body_id,
impl_item.span,
impl_item.id);
}
ImplItemKind::Type(ref ty) => {
visitor.visit_id(impl_item.id);
visitor.visit_ty(ty);
}
ImplItemKind::Existential(ref bounds) => {
visitor.visit_id(impl_item.id);
walk_list!(visitor, visit_param_bound, bounds);
}
}
}
pub fn walk_impl_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, impl_item_ref: &'v ImplItemRef) {
// NB: Deliberately force a compilation error if/when new fields are added.
let ImplItemRef { id, ident, ref kind, span: _, ref vis, ref defaultness } = *impl_item_ref;
visitor.visit_nested_impl_item(id);
visitor.visit_ident(ident);
visitor.visit_associated_item_kind(kind);
visitor.visit_vis(vis);
visitor.visit_defaultness(defaultness);
}
pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) {
visitor.visit_id(struct_definition.id());
walk_list!(visitor, visit_struct_field, struct_definition.fields());
}
pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) {
visitor.visit_id(struct_field.id);
visitor.visit_vis(&struct_field.vis);
visitor.visit_ident(struct_field.ident);
visitor.visit_ty(&struct_field.ty);
walk_list!(visitor, visit_attribute, &struct_field.attrs);
}
pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) {
visitor.visit_id(block.id);
walk_list!(visitor, visit_stmt, &block.stmts);
walk_list!(visitor, visit_expr, &block.expr);
}
pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) {
match statement.node {
StmtKind::Decl(ref declaration, id) => {
visitor.visit_id(id);
visitor.visit_decl(declaration)
}
StmtKind::Expr(ref expression, id) |
StmtKind::Semi(ref expression, id) => {
visitor.visit_id(id);
visitor.visit_expr(expression)
}
}
}
pub fn walk_decl<'v, V: Visitor<'v>>(visitor: &mut V, declaration: &'v Decl) {
match declaration.node {
DeclKind::Local(ref local) => visitor.visit_local(local),
DeclKind::Item(item) => visitor.visit_nested_item(item),
}
}
pub fn walk_anon_const<'v, V: Visitor<'v>>(visitor: &mut V, constant: &'v AnonConst) {
visitor.visit_id(constant.id);
visitor.visit_nested_body(constant.body);
}
pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) {
visitor.visit_id(expression.id);
walk_list!(visitor, visit_attribute, expression.attrs.iter());
match expression.node {
ExprKind::Box(ref subexpression) |
ExprKind::UnsafeBox(ref subexpression)=> {
visitor.visit_expr(subexpression)
}
ExprKind::Array(ref subexpressions) => {
walk_list!(visitor, visit_expr, subexpressions);
}
ExprKind::Repeat(ref element, ref count) => {
visitor.visit_expr(element);
visitor.visit_anon_const(count)
}
ExprKind::Struct(ref qpath, ref fields, ref optional_base) => {
visitor.visit_qpath(qpath, expression.hir_id, expression.span);
for field in fields {
visitor.visit_id(field.id);
visitor.visit_ident(field.ident);
visitor.visit_expr(&field.expr)
}
walk_list!(visitor, visit_expr, optional_base);
}
ExprKind::Tup(ref subexpressions) => {
walk_list!(visitor, visit_expr, subexpressions);
}
ExprKind::Call(ref callee_expression, ref arguments) => {
visitor.visit_expr(callee_expression);
walk_list!(visitor, visit_expr, arguments);
}
ExprKind::MethodCall(ref segment, _, ref arguments) => {
visitor.visit_path_segment(expression.span, segment);
walk_list!(visitor, visit_expr, arguments);
}
ExprKind::Binary(_, ref left_expression, ref right_expression) => {
visitor.visit_expr(left_expression);
visitor.visit_expr(right_expression)
}
ExprKind::AddrOf(_, ref subexpression) | ExprKind::Unary(_, ref subexpression) => {
visitor.visit_expr(subexpression)
}
ExprKind::Lit(_) => {}
ExprKind::Cast(ref subexpression, ref typ) | ExprKind::Type(ref subexpression, ref typ) => {
visitor.visit_expr(subexpression);
visitor.visit_ty(typ)
}
ExprKind::If(ref head_expression, ref if_block, ref optional_else) => {
visitor.visit_expr(head_expression);
visitor.visit_expr(if_block);
walk_list!(visitor, visit_expr, optional_else);
}
ExprKind::While(ref subexpression, ref block, ref opt_label) => {
walk_list!(visitor, visit_label, opt_label);
visitor.visit_expr(subexpression);
visitor.visit_block(block);
}
ExprKind::Loop(ref block, ref opt_label, _) => {
walk_list!(visitor, visit_label, opt_label);
visitor.visit_block(block);
}
ExprKind::Match(ref subexpression, ref arms, _) => {
visitor.visit_expr(subexpression);
walk_list!(visitor, visit_arm, arms);
}
ExprKind::Closure(_, ref function_declaration, body, _fn_decl_span, _gen) => {
visitor.visit_fn(FnKind::Closure(&expression.attrs),
function_declaration,
body,
expression.span,
expression.id)
}
ExprKind::Block(ref block, ref opt_label) => {
walk_list!(visitor, visit_label, opt_label);
visitor.visit_block(block);
}
ExprKind::Assign(ref left_hand_expression, ref right_hand_expression) => {
visitor.visit_expr(right_hand_expression);
visitor.visit_expr(left_hand_expression)
}
ExprKind::AssignOp(_, ref left_expression, ref right_expression) => {
visitor.visit_expr(right_expression);
visitor.visit_expr(left_expression)
}
ExprKind::Field(ref subexpression, ident) => {
visitor.visit_expr(subexpression);
visitor.visit_ident(ident);
}
ExprKind::Index(ref main_expression, ref index_expression) => {
visitor.visit_expr(main_expression);
visitor.visit_expr(index_expression)
}
ExprKind::Path(ref qpath) => {
visitor.visit_qpath(qpath, expression.hir_id, expression.span);
}
ExprKind::Break(ref destination, ref opt_expr) => {
if let Some(ref label) = destination.label {
visitor.visit_label(label);
match destination.target_id {
Ok(node_id) => visitor.visit_def_mention(Def::Label(node_id)),
Err(_) => {},
};
}
walk_list!(visitor, visit_expr, opt_expr);
}
ExprKind::Continue(ref destination) => {
if let Some(ref label) = destination.label {
visitor.visit_label(label);
match destination.target_id {
Ok(node_id) => visitor.visit_def_mention(Def::Label(node_id)),
Err(_) => {},
};
}
}
ExprKind::Ret(ref optional_expression) => {
walk_list!(visitor, visit_expr, optional_expression);
}
ExprKind::InlineAsm(_, ref outputs, ref inputs) => {
for output in outputs {
visitor.visit_expr(output)
}
for input in inputs {
visitor.visit_expr(input)
}
}
ExprKind::Yield(ref subexpression) => {
visitor.visit_expr(subexpression);
}
}
}
pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm) {
walk_list!(visitor, visit_pat, &arm.pats);
walk_list!(visitor, visit_expr, &arm.guard);
visitor.visit_expr(&arm.body);
walk_list!(visitor, visit_attribute, &arm.attrs);
}
pub fn walk_vis<'v, V: Visitor<'v>>(visitor: &mut V, vis: &'v Visibility) {
if let VisibilityKind::Restricted { ref path, id, hir_id } = vis.node {
visitor.visit_id(id);
visitor.visit_path(path, hir_id)
}
}
pub fn walk_associated_item_kind<'v, V: Visitor<'v>>(_: &mut V, _: &'v AssociatedItemKind) {
// No visitable content here: this fn exists so you can call it if
// the right thing to do, should content be added in the future,
// would be to walk it.
}
pub fn walk_defaultness<'v, V: Visitor<'v>>(_: &mut V, _: &'v Defaultness) {
// No visitable content here: this fn exists so you can call it if
// the right thing to do, should content be added in the future,
// would be to walk it.
}
#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)]
pub struct IdRange {
pub min: NodeId,
pub max: NodeId,
}
impl IdRange {
pub fn max() -> IdRange {
IdRange {
min: NodeId::from_u32(u32::MAX),
max: NodeId::from_u32(u32::MIN),
}
}
pub fn empty(&self) -> bool {
self.min >= self.max
}
pub fn contains(&self, id: NodeId) -> bool {
id >= self.min && id < self.max
}
pub fn add(&mut self, id: NodeId) {
self.min = cmp::min(self.min, id);
self.max = cmp::max(self.max, NodeId::from_u32(id.as_u32() + 1));
}
}
pub struct IdRangeComputingVisitor<'a, 'hir: 'a> {
result: IdRange,
map: &'a map::Map<'hir>,
}
impl<'a, 'hir> IdRangeComputingVisitor<'a, 'hir> {
pub fn new(map: &'a map::Map<'hir>) -> IdRangeComputingVisitor<'a, 'hir> {
IdRangeComputingVisitor { result: IdRange::max(), map: map }
}
pub fn result(&self) -> IdRange {
self.result
}
}
impl<'a, 'hir> Visitor<'hir> for IdRangeComputingVisitor<'a, 'hir> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'hir> {
NestedVisitorMap::OnlyBodies(&self.map)
}
fn visit_id(&mut self, id: NodeId) {
self.result.add(id);
}
}
| visit_assoc_type_binding |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.