file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
admin-layout.routing.ts | import { Routes } from '@angular/router';
import { HomeComponent } from '../../home/home.component';
import { UserComponent } from '../../user/user.component';
import { TablesComponent } from '../../tables/tables.component';
import { IconsComponent } from '../../icons/icons.component';
import { MapsComponent } from '../../maps/maps.component';
import { NotificationsComponent } from '../../notifications/notifications.component';
import { UpgradeComponent } from '../../upgrade/upgrade.component';
import {TypographyComponent} from '../../TraitementDesDemandes/typography.component';
export const AdminLayoutRoutes: Routes = [
{ path: 'dashboard', component: HomeComponent },
{ path: 'user', component: UserComponent },
{ path: 'table', component: TablesComponent },
{ path: 'typography', component: TypographyComponent }, | { path: 'maps', component: MapsComponent },
{ path: 'notifications', component: NotificationsComponent },
{ path: 'upgrade', component: UpgradeComponent },
]; | { path: 'icons', component: IconsComponent }, |
Nav.py | class MenuItem(object):
TEXT_NAME = 'name'
TEXT_URL = 'url_name'
TEXT_SUBMENU = 'submenu'
def __init__(self, name, url=None, *args):
super(MenuItem, self).__init__()
self.name = name
self.url = url
self.url_args = args
self.sub_menu = []
def add_sub_menu_item(self, name, url):
item = {self.TEXT_NAME: name, self.TEXT_URL: url}
self.sub_menu.append(item)
def __getitem__(self, key):
return self[key]
def to_text(self):
output = {}
output[self.TEXT_NAME] = self.name
if self.url:
output[self.TEXT_URL] = self.url
if self.sub_menu:
output[self.TEXT_SUBMENU] = self.sub_menu
return output
class Nav:
def __init__(self, *args, **kwargs): | self.menu = []
def add_menu(self, menu):
self.menu.append(menu)
def get_menu_list(self):
output = []
for x in self.menu:
output.append(x.to_text())
return output | |
controlsvc.go | //go:build !no_controlsvc
// +build !no_controlsvc
package controlsvc
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"os"
"runtime"
"strings"
"sync"
"time"
"github.com/ansible/receptor/pkg/logger"
"github.com/ansible/receptor/pkg/netceptor"
"github.com/ansible/receptor/pkg/tls"
"github.com/ansible/receptor/pkg/utils"
"github.com/ghjm/cmdline"
)
// sockControl implements the ControlFuncOperations interface that is passed back to control functions.
type sockControl struct {
conn net.Conn
}
func (s *sockControl) RemoteAddr() net.Addr {
return s.conn.RemoteAddr()
}
// BridgeConn bridges the socket to another socket.
func (s *sockControl) BridgeConn(message string, bc io.ReadWriteCloser, bcName string) error {
if message != "" {
_, err := s.conn.Write([]byte(message))
if err != nil {
return err
}
}
utils.BridgeConns(s.conn, "control service", bc, bcName)
return nil
}
// ReadFromConn copies from the socket to an io.Writer, until EOF.
func (s *sockControl) ReadFromConn(message string, out io.Writer) error {
if message != "" {
_, err := s.conn.Write([]byte(message))
if err != nil {
return err
}
}
if _, err := io.Copy(out, s.conn); err != nil {
return err
}
return nil
}
// WriteToConn writes an initial string, and then messages to a channel, to the connection.
func (s *sockControl) WriteToConn(message string, in chan []byte) error {
if message != "" {
_, err := s.conn.Write([]byte(message))
if err != nil {
return err
}
}
for bytes := range in {
_, err := s.conn.Write(bytes)
if err != nil {
return err
}
}
return nil
}
func (s *sockControl) Close() error {
return s.conn.Close()
}
// Server is an instance of a control service.
type Server struct {
nc *netceptor.Netceptor
controlFuncLock sync.RWMutex
controlTypes map[string]ControlCommandType
}
// New returns a new instance of a control service.
func New(stdServices bool, nc *netceptor.Netceptor) *Server {
s := &Server{
nc: nc,
controlFuncLock: sync.RWMutex{},
controlTypes: make(map[string]ControlCommandType),
}
if stdServices {
s.controlTypes["ping"] = &pingCommandType{}
s.controlTypes["status"] = &statusCommandType{}
s.controlTypes["connect"] = &connectCommandType{}
s.controlTypes["traceroute"] = &tracerouteCommandType{}
s.controlTypes["reload"] = &reloadCommandType{}
}
return s
}
// MainInstance is the global instance of the control service instantiated by the command-line main() function.
var MainInstance *Server
// AddControlFunc registers a function that can be used from a control socket.
func (s *Server) AddControlFunc(name string, cType ControlCommandType) error {
s.controlFuncLock.Lock()
defer s.controlFuncLock.Unlock()
if _, ok := s.controlTypes[name]; ok {
return fmt.Errorf("control function named %s already exists", name)
}
s.controlTypes[name] = cType
return nil
}
// RunControlSession runs the server protocol on the given connection.
func (s *Server) RunControlSession(conn net.Conn) {
logger.Info("Client connected to control service %s\n", conn.RemoteAddr().String())
defer func() {
logger.Info("Client disconnected from control service %s\n", conn.RemoteAddr().String())
if conn != nil {
err := conn.Close()
if err != nil {
logger.Error("Error closing connection: %s\n", err)
}
}
}()
_, err := conn.Write([]byte(fmt.Sprintf("Receptor Control, node %s\n", s.nc.NodeID())))
if err != nil {
logger.Error("Write error in control service: %s\n", err)
return
}
done := false
for !done {
// Inefficiently read one line from the socket - we can't use bufio
// because we cannot read ahead beyond the newline character
cmdBytes := make([]byte, 0)
buf := make([]byte, 1)
for {
n, err := conn.Read(buf)
if err == io.EOF {
logger.Info("Control service closed\n")
done = true
break
} else if err != nil {
logger.Error("Read error in control service: %s\n", err)
return
}
if n == 1 {
if buf[0] == '\r' {
continue
} else if buf[0] == '\n' {
break
}
cmdBytes = append(cmdBytes, buf[0])
}
}
if len(cmdBytes) == 0 {
continue
}
var cmd string
var params string
var jsonData map[string]interface{}
if cmdBytes[0] == '{' {
err = json.Unmarshal(cmdBytes, &jsonData)
if err == nil {
cmdIf, ok := jsonData["command"]
if ok {
cmd, ok = cmdIf.(string)
if !ok {
err = fmt.Errorf("command must be a string")
}
} else {
err = fmt.Errorf("JSON did not contain a command")
}
}
if err != nil {
_, err = conn.Write([]byte(fmt.Sprintf("ERROR: %s\n", err)))
if err != nil {
logger.Error("Write error in control service: %s\n", err)
return
}
}
} else {
tokens := strings.SplitN(string(cmdBytes), " ", 2)
if len(tokens) > 0 {
cmd = strings.ToLower(tokens[0])
if len(tokens) > 1 {
params = tokens[1]
}
}
}
s.controlFuncLock.RLock()
var ct ControlCommandType
for f := range s.controlTypes {
if f == cmd {
ct = s.controlTypes[f]
break
}
}
s.controlFuncLock.RUnlock()
if ct != nil {
cfo := &sockControl{
conn: conn,
}
var cfr map[string]interface{}
var cc ControlCommand
if jsonData == nil {
cc, err = ct.InitFromString(params)
} else {
cc, err = ct.InitFromJSON(jsonData)
}
if err == nil {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
cfr, err = cc.ControlFunc(ctx, s.nc, cfo)
}
if err != nil {
logger.Error(err.Error())
_, err = conn.Write([]byte(fmt.Sprintf("ERROR: %s\n", err)))
if err != nil {
logger.Error("Write error in control service: %s\n", err)
return
}
} else if cfr != nil {
rbytes, err := json.Marshal(cfr)
if err != nil {
_, err = conn.Write([]byte(fmt.Sprintf("ERROR: could not convert response to JSON: %s\n", err)))
if err != nil {
logger.Error("Write error in control service: %s\n", err)
return
}
}
rbytes = append(rbytes, '\n')
_, err = conn.Write(rbytes)
if err != nil {
logger.Error("Write error in control service: %s\n", err)
return
}
}
} else {
_, err = conn.Write([]byte("ERROR: Unknown command\n"))
if err != nil {
logger.Error("Write error in control service: %s\n", err)
return
}
}
}
}
// RunControlSvc runs the main accept loop of the control service.
func (s *Server) RunControlSvc(ctx context.Context, service string, tlscfg *tls.Config,
unixSocket string, unixSocketPermissions os.FileMode, tcpListen string, tcptls *tls.Config) error {
var uli net.Listener
var lock *utils.FLock
var err error
if unixSocket != "" {
uli, lock, err = utils.UnixSocketListen(unixSocket, unixSocketPermissions)
if err != nil {
return fmt.Errorf("error opening Unix socket: %s", err)
}
} else {
uli = nil
}
var tli net.Listener
if tcpListen != "" {
var listenAddr string
if strings.Contains(tcpListen, ":") {
listenAddr = tcpListen
} else {
listenAddr = fmt.Sprintf("0.0.0.0:%s", tcpListen)
}
tli, err = net.Listen("tcp", listenAddr)
if err != nil {
return fmt.Errorf("error listening on TCP socket: %s", err)
}
if tcptls != nil {
tli = tls.NewListener(tli, tcptls)
}
} else {
tli = nil
}
var li *netceptor.Listener
if service != "" {
li, err = s.nc.ListenAndAdvertise(service, tlscfg, map[string]string{
"type": "Control Service",
})
if err != nil {
return fmt.Errorf("error opening Unix socket: %s", err)
}
} else {
li = nil
}
if uli == nil && li == nil {
return fmt.Errorf("no listeners specified")
}
logger.Info("Running control service %s\n", service)
go func() {
<-ctx.Done()
if uli != nil {
_ = uli.Close()
_ = lock.Unlock()
}
if li != nil {
_ = li.Close()
}
if tli != nil {
_ = tli.Close()
}
}()
for _, listener := range []net.Listener{uli, tli, li} {
if listener != nil {
go func(listener net.Listener) {
for {
conn, err := listener.Accept()
if ctx.Err() != nil {
return
}
if err != nil {
if strings.HasSuffix(err.Error(), "normal close") {
continue
}
}
if err != nil {
logger.Error("Error accepting connection: %s. Closing listener.\n", err)
_ = listener.Close()
return
}
go func() {
defer conn.Close()
tlsConn, ok := conn.(*tls.Conn)
if ok {
// Explicitly run server TLS handshake so we can deal with timeout and errors here
err = conn.SetDeadline(time.Now().Add(10 * time.Second))
if err != nil {
logger.Error("Error setting timeout: %s. Closing socket.\n", err)
return
}
err = tlsConn.Handshake()
if err != nil {
logger.Error("TLS handshake error: %s. Closing socket.\n", err)
return
}
err = conn.SetDeadline(time.Time{})
if err != nil {
logger.Error("Error clearing timeout: %s. Closing socket.\n", err)
return
}
}
s.RunControlSession(conn)
}()
}
}(listener)
}
}
return nil
}
// **************************************************************************
// Command line
// **************************************************************************
// cmdlineConfigWindows is the cmdline configuration object for a control service on Windows.
type cmdlineConfigWindows struct {
Service string `description:"Receptor service name to listen on" default:"control"`
TLS string `description:"Name of TLS server config for the Receptor listener"`
TCPListen string `description:"Local TCP port or host:port to bind to the control service"`
TCPTLS string `description:"Name of TLS server config for the TCP listener"`
}
// cmdlineConfigUnix is the cmdline configuration object for a control service on Unix.
type cmdlineConfigUnix struct {
Service string `description:"Receptor service name to listen on" default:"control"`
Filename string `description:"Filename of local Unix socket to bind to the service"`
Permissions int `description:"Socket file permissions" default:"0600"`
TLS string `description:"Name of TLS server config for the Receptor listener"`
TCPListen string `description:"Local TCP port or host:port to bind to the control service"`
TCPTLS string `description:"Name of TLS server config for the TCP listener"`
}
// Run runs the action.
func (cfg cmdlineConfigUnix) Run() error {
if cfg.TLS != "" && cfg.TCPListen != "" && cfg.TCPTLS == "" {
logger.Warning("Control service %s has TLS configured on the Receptor listener but not the TCP listener.", cfg.Service)
}
tlscfg, err := netceptor.MainInstance.GetServerTLSConfig(cfg.TLS)
if err != nil {
return err
}
var tcptls *tls.Config
if cfg.TCPListen != "" {
tcptls, err = netceptor.MainInstance.GetServerTLSConfig(cfg.TCPTLS)
if err != nil {
return err
}
}
err = MainInstance.RunControlSvc(context.Background(), cfg.Service, tlscfg, cfg.Filename,
os.FileMode(cfg.Permissions), cfg.TCPListen, tcptls)
if err != nil {
return err
}
return nil
}
// Run runs the action.
func (cfg cmdlineConfigWindows) Run() error {
return cmdlineConfigUnix{
Service: cfg.Service,
TLS: cfg.TLS,
TCPListen: cfg.TCPListen,
TCPTLS: cfg.TCPTLS,
}.Run()
}
func init() |
type Controllers struct {
UnixControl []UnixControl `mapstructure:"unix"`
TCPControl []TCPControl `mapstructure:"tcp"`
}
func (c Controllers) Setup(ctx context.Context, cv *Server) error {
for _, c := range c.UnixControl {
if err := c.setup(ctx, cv); err != nil {
return fmt.Errorf("could not setup unix controller from controllers config: %w", err)
}
}
for _, c := range c.TCPControl {
if err := c.setup(ctx, cv); err != nil {
return fmt.Errorf("could not setup tcp controller from controllers config: %w", err)
}
}
return nil
}
// UnixControl exposes a receptor control socket via unix socket.
type UnixControl struct {
// Receptor service name to listen on.
Service *string `mapstructure:"service"`
// Filename of local Unix socket to bind to the service.
File string `mapstructure:"file"`
// Socket file permissions.
Permissions *int `mapstructure:"permissions"`
// TLS config to use for the transport within receptor.
// Leave empty for no TLS.
MeshTLS *tls.ServerConf `mapstructure:"mesh-tls"`
}
func (s *UnixControl) setup(ctx context.Context, cv *Server) error {
service := "control"
if s.Service != nil {
service = *s.Service
}
perms := 0o600
if s.Permissions != nil {
perms = *s.Permissions
}
var err error
var tlsReceptor *tls.Config
if s.MeshTLS != nil {
tlsReceptor, err = s.MeshTLS.TLSConfig()
if err != nil {
return fmt.Errorf("could not create receptor tls config for tcp control service %s: %w", service, err)
}
}
return cv.RunControlSvc(
ctx,
service,
tlsReceptor,
s.File,
os.FileMode(perms),
"",
nil,
)
}
// TCPControl exposes a receptor control socket via TCP.
type TCPControl struct {
// Receptor service name to listen on.
Service *string `mapstructure:"service"`
// TLS config to use for the transport within receptor.
// Leave empty for no TLS.
MeshTLS *tls.ServerConf `mapstructure:"mesh-tls"`
// TLS config to use for the exposed control port..
// Leave empty for no TLS.
TCPTLS *tls.ServerConf `mapstructure:"tcp-tls"`
// Address to listen on ("host:port" from net package).
Address string `mapstructure:"address"`
}
func (s *TCPControl) setup(ctx context.Context, cv *Server) error {
service := "control"
if s.Service != nil {
service = *s.Service
}
var err error
var tlsReceptor *tls.Config
var tcptls *tls.Config
if s.MeshTLS != nil {
tlsReceptor, err = s.MeshTLS.TLSConfig()
if err != nil {
return fmt.Errorf("could not create receptor tls config for tcp control service %s: %w", service, err)
}
}
if s.TCPTLS != nil {
tcptls, err = s.TCPTLS.TLSConfig()
if err != nil {
return fmt.Errorf("could not create tcp tls config for tcp control service %s: %w", service, err)
}
}
return cv.RunControlSvc(
ctx,
service,
tlsReceptor,
"",
0,
s.Address,
tcptls,
)
}
| {
if runtime.GOOS == "windows" {
cmdline.RegisterConfigTypeForApp("receptor-control-service",
"control-service", "Run a control service", cmdlineConfigWindows{})
} else {
cmdline.RegisterConfigTypeForApp("receptor-control-service",
"control-service", "Run a control service", cmdlineConfigUnix{})
}
} |
instruction_decoder.rs | use Instruction;
use InstructionGroup;
use Operand;
pub struct InstructionDecoder;
impl InstructionDecoder {
#[inline(always)]
pub fn | (code: u16) -> Instruction {
let mut inst_code = code;
let i = (inst_code & 0xFF) as u8;
let c4 = (inst_code & 0xF) as u8;
inst_code >>= 4;
let m = (inst_code & 0xF) as u8;
inst_code >>= 4;
let n = (inst_code & 0xF) as u8;
inst_code >>= 4;
let c1 = (inst_code & 0xF) as u8;
let op_n = Operand::RegisterOperand(n);
let op_m = Operand::RegisterOperand(m);
let imm = Operand::ImmediateOperand(i);
let disp = Operand::DisplacementOperand(i);
match c1 {
0x0 => match c4 {
0x2 => match m {
0x1 => Instruction::StcGbr(op_n),
x if x >= 8 => Instruction::StcBanked(op_n, x - 8),
_ => Instruction::Unknown,
},
0x3 => match m {
0x0 => Instruction::Bsrf(op_n),
0x2 => Instruction::Braf(op_n),
0x8 => Instruction::Pref(op_n),
0xC => Instruction::MovCA(op_n),
_ => Instruction::Unknown
},
0x4 => Instruction::MovDataStoreR0B(op_n, op_m),
0x5 => Instruction::MovDataStoreR0W(op_n, op_m),
0x6 => Instruction::MovDataStoreR0L(op_n, op_m),
0x7 => Instruction::MulL(op_n, op_m),
0x8 => match m {
0x0 => Instruction::Clrt,
0x1 => Instruction::Sett,
0x4 => Instruction::Clrs,
0x5 => Instruction::Sets,
_ => Instruction::Unknown
},
0x9 => match m {
0x0 => Instruction::Nop,
0x1 => Instruction::Div0u,
0x2 => Instruction::MovT(op_n),
_ => Instruction::Unknown
},
0xA => match m {
0x0 => Instruction::StsMacH(op_n),
0x1 => Instruction::StsMacL(op_n),
0x2 => Instruction::StsPr(op_n),
0xF => Instruction::StcDbr(op_n),
_ => Instruction::Unknown
},
0xB => Instruction::Rts,
0xC => Instruction::MovDataLoadR0B(op_n, op_m),
0xD => Instruction::MovDataLoadR0W(op_n, op_m),
0xE => Instruction::MovDataLoadR0L(op_n, op_m),
0xF => Instruction::MacL(op_n, op_m),
_ => Instruction::Unknown
},
0x1 => Instruction::MovStructStoreL(op_n, imm),
0x2 => match c4 {
0x0 => Instruction::MovDataBStore(op_n, op_m),
0x1 => Instruction::MovDataWStore(op_n, op_m),
0x2 => Instruction::MovDataLStore(op_n, op_m),
0x4 => Instruction::MovDataBStore1(op_n, op_m),
0x5 => Instruction::MovDataWStore2(op_n, op_m),
0x6 => Instruction::MovDataLStore4(op_n, op_m),
0x7 => Instruction::Div0s(op_n, op_m),
0x8 => Instruction::Tst(op_n, op_m),
0x9 => Instruction::And(op_n, op_m),
0xA => Instruction::Xor(op_n, op_m),
0xB => Instruction::Or(op_n, op_m),
0xC => Instruction::CmpStr(op_n, op_m),
0xE => Instruction::MulUW(op_n, op_m),
0xF => Instruction::MulSW(op_n, op_m),
_ => Instruction::Unknown
},
0x3 => match c4 {
0x0 => Instruction::CmpEq(op_n, op_m),
0x2 => Instruction::CmpHs(op_n, op_m),
0x3 => Instruction::CmpGe(op_n, op_m),
0x4 => Instruction::Div1(op_n, op_m),
0x6 => Instruction::CmpHi(op_n, op_m),
0x7 => Instruction::CmpGt(op_n, op_m),
0x8 => Instruction::Sub(op_n, op_m),
0xC => Instruction::Add(op_n, op_m),
0xE => Instruction::AddWithCarry(op_n, op_m),
0xF => Instruction::AddOverflow(op_n, op_m),
_ => Instruction::Unknown
},
0x4 => match c4 {
0x0 => match m {
0x0 => Instruction::Shll(op_n),
0x1 => Instruction::Dt(op_n),
_ => Instruction::Unknown,
},
0x1 => match m {
0x0 => Instruction::Shlr(op_n),
0x1 => Instruction::CmpPz(op_n),
0x2 => Instruction::Shar(op_n),
_ => Instruction::Unknown
},
0x2 => match m {
0x0 => Instruction::StsLMacH(op_n),
0x1 => Instruction::StsLMacL(op_n),
0x2 => Instruction::StsLPr(op_n),
_ => Instruction::Unknown
},
0x4 => match m {
0x0 => Instruction::Rotl(op_n),
0x2 => Instruction::RotCl(op_n),
_ => Instruction::Unknown
},
0x5 => match m {
0x0 => Instruction::Rotr(op_n),
0x1 => Instruction::CmpPl(op_n),
0x2 => Instruction::RotCr(op_n),
_ => Instruction::Unknown
},
0x6 => match m {
0x0 => Instruction::LdsLMacl(op_n),
0x1 => Instruction::LdsLMach(op_n),
0x2 => Instruction::LdsLPr(op_n),
0x5 => Instruction::LdsFpulL(op_n),
0x6 => Instruction::LdsFpscrL(op_n),
_ => Instruction::Unknown
},
0x7 => match m {
0x0 => Instruction::LdcLSr(op_n),
0x1 => Instruction::LdcLGbr(op_n),
0x2 => Instruction::LdcLVbr(op_n),
0x3 => Instruction::LdcLSsr(op_n),
0x4 => Instruction::LdcLSpc(op_n),
_ => Instruction::Unknown
},
0x8 => match m {
0x0 => Instruction::Shll2(op_n),
0x1 => Instruction::Shll8(op_n),
0x2 => Instruction::Shll16(op_n),
_ => Instruction::Unknown
},
0x9 => match m {
0x0 => Instruction::Shlr2(op_n),
0x1 => Instruction::Shlr8(op_n),
0x2 => Instruction::Shlr16(op_n),
_ => Instruction::Unknown
},
0xA => match m {
0x2 => Instruction::LdsPr(op_n),
0x6 => Instruction::LdsFpscr(op_n),
0xF => Instruction::LdcDbr(op_n),
_ => Instruction::Unknown
},
0xB => match m {
0x0 => Instruction::Jsr(op_n),
0x1 => Instruction::Tas(op_n),
0x2 => Instruction::Jmp(op_n),
_ => Instruction::Unknown
},
0xE => Instruction::LdcSr(op_n),
_ => Instruction::Unknown
},
0x5 => Instruction::MovStructLoadL(op_n, imm),
0x6 => match c4 {
0x0 => Instruction::MovDataSignBLoad(op_n, op_m),
0x1 => Instruction::MovDataSignWLoad(op_n, op_m),
0x2 => Instruction::MovDataSignLLoad(op_n, op_m),
0x3 => Instruction::MovData(op_n, op_m),
0x4 => Instruction::MovDataSignBLoad1(op_n, op_m),
0x5 => Instruction::MovDataSignWLoad2(op_n, op_m),
0x6 => Instruction::MovDataSignLLoad4(op_n, op_m),
0x7 => Instruction::Not(op_n, op_m),
0x8 => Instruction::SwapB(op_n, op_m),
0x9 => Instruction::SwapW(op_n, op_m),
0xB => Instruction::ExtUB(op_n, op_m),
0xC => Instruction::ExtUW(op_n, op_m),
0xE => Instruction::ExtSB(op_n, op_m),
0xF => Instruction::ExtSW(op_n, op_m),
_ => Instruction::Unknown
},
0x7 => Instruction::AddConstant(op_n, imm),
0x8 => match n {
0x0 => Instruction::MovStructStoreB(op_m, Operand::DisplacementOperand(c4 as u8)),
0x1 => Instruction::MovStructStoreW(op_m, Operand::DisplacementOperand(c4 as u8)),
0x4 => Instruction::MovStructLoadB(op_m, Operand::DisplacementOperand(c4 as u8)),
0x5 => Instruction::MovStructLoadW(op_m, Operand::DisplacementOperand(c4 as u8)),
0x8 => Instruction::CmpEqImm(imm),
0x9 => Instruction::Bt(disp),
0xB => Instruction::Bf(disp),
0xD => Instruction::Bts(disp),
0xF => Instruction::Bfs(disp),
_ => Instruction::Unknown
},
0x9 => Instruction::MovConstantLoadW(op_n, disp),
0xA => Instruction::Bra(op_n, disp),
0xB => Instruction::Bsr(op_n, disp),
0xC => match n {
0x0 => Instruction::MovGlobalStoreB(disp),
0x1 => Instruction::MovGlobalStoreW(disp),
0x2 => Instruction::MovGlobalStoreL(disp),
0x4 => Instruction::MovGlobalLoadB(disp),
0x5 => Instruction::MovGlobalLoadW(disp),
0x6 => Instruction::MovGlobalLoadL(disp),
0x7 => Instruction::MovA(disp),
0x8 => Instruction::TstImm(imm),
0x9 => Instruction::AndImm(imm),
0xA => Instruction::XorImm(imm),
0xB => Instruction::OrImm(imm),
0xC => Instruction::TstB(imm),
0xD => Instruction::AndB(imm),
0xE => Instruction::XorB(imm),
0xF => Instruction::OrB(imm),
_ => Instruction::Unknown,
},
0xD => Instruction::MovConstantLoadL(op_n, disp),
0xE => Instruction::MovConstantSign(op_n, imm),
0xF => match c4 {
0x0 => Instruction::FAdd(op_n, op_m),
0x9 => match n % 2 {
0x0 => Instruction::FMovLoadD8(op_n, op_m),
_ => Instruction::FMovLoadS4(op_n, op_m),
},
0xB => match m % 2 {
0x0 => Instruction::FMovStoreD8(op_n, Operand::RegisterOperand(m >> 1)),
_ => Instruction::FMovStoreS4(op_n, op_m)
},
0xC => Instruction::FMov(op_n, op_m),
0xD => match m {
0xF => Instruction::Frchg,
_ => Instruction::Unknown
},
_ => Instruction::Unknown
},
_ => Instruction::Unknown
}
}
pub fn instruction_group(inst: Instruction) -> InstructionGroup {
match inst {
Instruction::Clrt => InstructionGroup::MT,
Instruction::CmpEq(_, _) => InstructionGroup::MT,
Instruction::Add(_, _) => InstructionGroup::EX,
Instruction::AddConstant(_, _) => InstructionGroup::EX,
Instruction::AddWithCarry(_, _) => InstructionGroup::EX,
Instruction::AddOverflow(_, _) => InstructionGroup::EX,
Instruction::And(_, _) => InstructionGroup::EX,
Instruction::Bf(_) => InstructionGroup::BR,
Instruction::Bfs(_) => InstructionGroup::BR,
Instruction::Bt(_) => InstructionGroup::BR,
Instruction::Bts(_) => InstructionGroup::BR,
Instruction::Bra(_, _) => InstructionGroup::BR,
Instruction::Bsr(_, _) => InstructionGroup::BR,
_ => InstructionGroup::Unknown
}
}
pub fn parallelizable(a: InstructionGroup, b: InstructionGroup) -> bool {
if a == b {
return false;
}
match (a, b) {
(InstructionGroup::MT, InstructionGroup::MT) => true,
(InstructionGroup::CO, _) => false,
(_, InstructionGroup::CO) => false,
_ => true
}
}
pub fn alters_pc(inst: Instruction) -> bool {
match inst {
Instruction::Bf(_) => true,
Instruction::Bt(_) => true,
Instruction::Jmp(_) => true,
_ => false
}
}
}
| decode |
main.go | // Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package main
import (
"log"
"os"
"time"
"github.com/moov-io/wire"
)
func main() {
file := wire.NewFile()
fwm := wire.FEDWireMessage{}
// Mandatory Fields
ss := wire.NewSenderSupplied()
ss.UserRequestCorrelation = "User Req"
ss.MessageDuplicationCode = wire.MessageDuplicationOriginal
fwm.SenderSupplied = ss
tst := wire.NewTypeSubType()
tst.TypeCode = wire.SettlementTransfer
tst.SubTypeCode = wire.BasicFundsTransfer
fwm.TypeSubType = tst
// InputMessageAccountabilityData
imad := wire.NewInputMessageAccountabilityData()
imad.InputCycleDate = time.Now().Format("20060102")
imad.InputSource = "Source08"
imad.InputSequenceNumber = "000001"
fwm.InputMessageAccountabilityData = imad
// Amount
amt := wire.NewAmount()
amt.Amount = "000001234567"
fwm.Amount = amt
// SenderDepositoryInstitution
sdi := wire.NewSenderDepositoryInstitution()
sdi.SenderABANumber = "121042882"
sdi.SenderShortName = "Wells Fargo NA"
fwm.SenderDepositoryInstitution = sdi
rdi := wire.NewReceiverDepositoryInstitution()
rdi.ReceiverABANumber = "231380104"
rdi.ReceiverShortName = "Citadel"
fwm.ReceiverDepositoryInstitution = rdi
bfc := wire.NewBusinessFunctionCode()
bfc.BusinessFunctionCode = wire.FEDFundsReturned
bfc.TransactionTypeCode = " "
fwm.BusinessFunctionCode = bfc
// Other Transfer Information
// Sender Reference
sr := wire.NewSenderReference()
sr.SenderReference = "Sender Reference"
fwm.SenderReference = sr
// Previous Message Identifier
pmi := wire.NewPreviousMessageIdentifier() | pmi.PreviousMessageIdentifier = "Previous Message Ident"
fwm.PreviousMessageIdentifier = pmi
// Beneficiary
// Beneficiary Intermediary FI
bifi := wire.NewBeneficiaryIntermediaryFI()
bifi.FinancialInstitution.IdentificationCode = wire.DemandDepositAccountNumber
bifi.FinancialInstitution.Identifier = "123456789"
bifi.FinancialInstitution.Name = "FI Name"
bifi.FinancialInstitution.Address.AddressLineOne = "Address One"
bifi.FinancialInstitution.Address.AddressLineTwo = "Address Two"
bifi.FinancialInstitution.Address.AddressLineThree = "Address Three"
fwm.BeneficiaryIntermediaryFI = bifi
// Beneficiary FI
bfi := wire.NewBeneficiaryFI()
bfi.FinancialInstitution.IdentificationCode = wire.DemandDepositAccountNumber
bfi.FinancialInstitution.Identifier = "123456789"
bfi.FinancialInstitution.Name = "FI Name"
bfi.FinancialInstitution.Address.AddressLineOne = "Address One"
bfi.FinancialInstitution.Address.AddressLineTwo = "Address Two"
bfi.FinancialInstitution.Address.AddressLineThree = "Address Three"
fwm.BeneficiaryFI = bfi
// Beneficiary
ben := wire.NewBeneficiary()
ben.Personal.IdentificationCode = wire.DriversLicenseNumber
ben.Personal.Identifier = "1234"
ben.Personal.Name = "Name"
ben.Personal.Address.AddressLineOne = "Address One"
ben.Personal.Address.AddressLineTwo = "Address Two"
ben.Personal.Address.AddressLineThree = "Address Three"
fwm.Beneficiary = ben
// Beneficiary Reference
br := wire.NewBeneficiaryReference()
br.BeneficiaryReference = "Reference"
fwm.BeneficiaryReference = br
// Originator
o := wire.NewOriginator()
o.Personal.IdentificationCode = wire.PassportNumber
o.Personal.Identifier = "1234"
o.Personal.Name = "Name"
o.Personal.Address.AddressLineOne = "Address One"
o.Personal.Address.AddressLineTwo = "Address Two"
o.Personal.Address.AddressLineThree = "Address Three"
fwm.Originator = o
// Originator FI
ofi := wire.NewOriginatorFI()
ofi.FinancialInstitution.IdentificationCode = wire.DemandDepositAccountNumber
ofi.FinancialInstitution.Identifier = "123456789"
ofi.FinancialInstitution.Name = "FI Name"
ofi.FinancialInstitution.Address.AddressLineOne = "Address One"
ofi.FinancialInstitution.Address.AddressLineTwo = "Address Two"
ofi.FinancialInstitution.Address.AddressLineThree = "Address Three"
fwm.OriginatorFI = ofi
// Instructing FI
ifi := wire.NewInstructingFI()
ifi.FinancialInstitution.IdentificationCode = wire.DemandDepositAccountNumber
ifi.FinancialInstitution.Identifier = "123456789"
ifi.FinancialInstitution.Name = "FI Name"
ifi.FinancialInstitution.Address.AddressLineOne = "Address One"
ifi.FinancialInstitution.Address.AddressLineTwo = "Address Two"
ifi.FinancialInstitution.Address.AddressLineThree = "Address Three"
fwm.InstructingFI = ifi
// Originator To Beneficiary
ob := wire.NewOriginatorToBeneficiary()
ob.LineOne = "LineOne"
ob.LineTwo = "LineTwo"
ob.LineThree = "LineThree"
ob.LineFour = "LineFour"
fwm.OriginatorToBeneficiary = ob
// FI to FI
// FIReceiverFI
firfi := wire.NewFIReceiverFI()
firfi.FIToFI.LineOne = "FIToFI Line One"
firfi.FIToFI.LineOne = "FIToFI Line Two"
firfi.FIToFI.LineOne = "FIToFI Line Three"
firfi.FIToFI.LineOne = "FIToFI Line Four"
firfi.FIToFI.LineOne = "FIToFI Line Five"
firfi.FIToFI.LineOne = "FIToFI Line Six"
fwm.FIReceiverFI = firfi
// FIIntermediaryFI
fiifi := wire.NewFIIntermediaryFI()
fiifi.FIToFI.LineOne = "FIIntermediaryFI Line One"
fiifi.FIToFI.LineOne = "FIIntermediaryFI Line Two"
fiifi.FIToFI.LineOne = "FIIntermediaryFI Line Three"
fiifi.FIToFI.LineOne = "FIIntermediaryFI Line Four"
fiifi.FIToFI.LineOne = "FIIntermediaryFI Line Five"
fiifi.FIToFI.LineOne = "FIIntermediaryFI Line Six"
fwm.FIIntermediaryFI = fiifi
// FIIntermediaryFIAdvice
fiifia := wire.NewFIIntermediaryFIAdvice()
fiifia.Advice.AdviceCode = wire.AdviceCodeLetter
fiifia.Advice.LineOne = "FIInterFIAdvice Line One"
fiifia.Advice.LineTwo = "FIInterFIAdvice Line Two"
fiifia.Advice.LineThree = "FIInterFIAdvice Line Three"
fiifia.Advice.LineFour = "FIInterFIAdvice Line Four"
fiifia.Advice.LineFive = "FIInterFIAdvice Line Five"
fiifia.Advice.LineSix = "FIInterFIAdvice Line Six"
fwm.FIIntermediaryFIAdvice = fiifia
// FIBeneficiaryFI
fibfi := wire.NewFIBeneficiaryFI()
fibfi.FIToFI.LineOne = "FIBenFI Line One"
fibfi.FIToFI.LineTwo = "FIBenFI Line Two"
fibfi.FIToFI.LineThree = "FIBenFI Line Three"
fibfi.FIToFI.LineFour = "FIBenFI Line Four"
fibfi.FIToFI.LineFive = "FIBenFI Line Five"
fibfi.FIToFI.LineSix = "FIBenFI Line Six"
fwm.FIBeneficiaryFI = fibfi
// FIBeneficiaryFIAdvice
fibfia := wire.NewFIBeneficiaryFIAdvice()
fibfia.Advice.AdviceCode = wire.AdviceCodeTelex
fibfia.Advice.LineOne = "FIBenFIAdvice Line One"
fibfia.Advice.LineTwo = "FIBenFIAdvice Line Two"
fibfia.Advice.LineThree = "FIBenFIAdvice Line Three"
fibfia.Advice.LineFour = "FIBenFIAdvice Line Four"
fibfia.Advice.LineFive = "FIBenFIAdvice Line Five"
fibfia.Advice.LineSix = "FIBenFIAdvice Line Six"
fwm.FIBeneficiaryFIAdvice = fibfia
// FIBeneficiary
fib := wire.NewFIBeneficiary()
fib.FIToFI.LineOne = "FIBen Line One"
fib.FIToFI.LineTwo = "FIBen Line Two"
fib.FIToFI.LineThree = "FIBen Line Three"
fib.FIToFI.LineFour = "FIBen Line Four"
fib.FIToFI.LineFive = "FIBen Line Five"
fib.FIToFI.LineSix = "FIBen Line Six"
fwm.FIBeneficiary = fib
// FIBeneficiaryAdvice
fiba := wire.NewFIBeneficiaryAdvice()
fiba.Advice.AdviceCode = wire.AdviceCodeLetter
fiba.Advice.LineOne = "FIBenAdvice Line One"
fiba.Advice.LineTwo = "FIBenAdvice Line Two"
fiba.Advice.LineThree = "FIBenAdvice Line Three"
fiba.Advice.LineFour = "FIBenAdvice Line Four"
fiba.Advice.LineFive = "FIBenAdvice Line Five"
fiba.Advice.LineSix = "FIBenAdvice Line Six"
fwm.FIBeneficiaryAdvice = fiba
// FIPaymentMethodToBeneficiary
pm := wire.NewFIPaymentMethodToBeneficiary()
pm.PaymentMethod = "CHECK"
pm.AdditionalInformation = "Additional Information"
fwm.FIPaymentMethodToBeneficiary = pm
// FIAdditionalFIToFI
fifi := wire.NewFIAdditionalFIToFI()
fifi.AdditionalFIToFI.LineOne = "FIAddFI Line One"
fifi.AdditionalFIToFI.LineTwo = "FIAddFI Line Two"
fifi.AdditionalFIToFI.LineThree = "FIAddFI Line Three"
fifi.AdditionalFIToFI.LineFour = "FIAddFI Line Four"
fifi.AdditionalFIToFI.LineFive = "FIAddFI Line Five"
fifi.AdditionalFIToFI.LineSix = "FIAddFI Line Six"
fwm.FIAdditionalFIToFI = fifi
file.AddFEDWireMessage(fwm)
if err := file.Create(); err != nil {
log.Fatalf("Could not create FEDWireMessage: %s\n", err)
}
if err := file.Validate(); err != nil {
log.Fatalf("Could not validate FEDWireMessage: %s\n", err)
}
w := wire.NewWriter(os.Stdout)
if err := w.Write(file); err != nil {
log.Fatalf("Unexpected error: %s\n", err)
}
w.Flush()
} | |
admin.py | from django.contrib import admin
from django import forms
from .models import *
from django_better_admin_arrayfield.admin.mixins import DynamicArrayMixin
import sdap.tools.forms as tool_forms
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from django.apps import apps
class ExpressionStudyAdmin(admin.ModelAdmin, DynamicArrayMixin):
fieldsets = [
(None, {'fields': ['database','article', 'pmid', 'status', 'ome', 'experimental_design', 'topics', 'tissues', 'sex',
'dev_stage', 'age', 'antibody', 'mutant', 'cell_sorted', 'keywords', 'samples_count', 'read_groups', 'edit_groups',
]
}
),
]
class | (admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name', 'file','gene_type','gene_number', 'technology', 'species' ,'cell_number', 'study'
]
}
),
]
list_display = ['name', 'class_name']
class GeneAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['gene_id','tax_id','symbol','synonyms','description','homolog_id','ensemble_id'
]
}
),
]
list_display = ['symbol', 'gene_id']
search_fields = ['symbol']
class GeneListAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['name','created_by','species','genes'
]
}
),
]
autocomplete_fields = ['genes']
admin.site.register(ExpressionStudy, ExpressionStudyAdmin)
admin.site.register(ExpressionData, ExpressionDataAdmin)
admin.site.register(GeneList, GeneListAdmin)
admin.site.register(Gene, GeneAdmin)
admin.site.register(Database)
| ExpressionDataAdmin |
train.py | # -*- coding:utf-8 -*-
# Author: Xue Yang <[email protected]>
#
# License: Apache-2.0 license
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
sys.path.append("../../")
from tools.train_base import Train
from libs.configs import cfgs
from libs.models.detectors.r3det_gwd import build_whole_network
from libs.utils.coordinate_convert import backward_convert, get_horizen_minAreaRectangle
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP
class TrainR3DetGWD(Train):
def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects):
|
def main(self):
with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
num_gpu = len(cfgs.GPU_GROUP.strip().split(','))
global_step = slim.get_or_create_global_step()
lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu)
tf.summary.scalar('lr', lr)
optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs,
is_training=True)
with tf.name_scope('get_batch'):
if cfgs.IMAGE_PYRAMID:
shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)
shortside_len = tf.random_shuffle(shortside_len_list)[0]
else:
shortside_len = cfgs.IMG_SHORT_SIDE_LEN
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \
self.reader.next_batch(dataset_name=cfgs.DATASET_NAME,
batch_size=cfgs.BATCH_SIZE * num_gpu,
shortside_len=shortside_len,
is_training=True)
# data processing
inputs_list = []
for i in range(num_gpu):
img = tf.expand_dims(img_batch[i], axis=0)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img = img / tf.constant([cfgs.PIXEL_STD])
gtboxes_and_label_r = tf.py_func(backward_convert,
inp=[gtboxes_and_label_batch[i]],
Tout=tf.float32)
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
num_objects = num_objects_batch[i]
num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32)
img_h = img_h_batch[i]
img_w = img_w_batch[i]
inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w])
tower_grads = []
biases_regularizer = tf.no_regularizer
weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
with slim.arg_scope(
[slim.model_variable, slim.variable],
device='/device:CPU:0'):
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d,
slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label,
inp=[inputs_list[i][1],
inputs_list[i][2],
inputs_list[i][3]],
Tout=[tf.float32, tf.float32])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
img = inputs_list[i][0]
img_shape = inputs_list[i][-2:]
img = tf.image.crop_to_bounding_box(image=img,
offset_height=0,
offset_width=0,
target_height=tf.cast(img_shape[0], tf.int32),
target_width=tf.cast(img_shape[1], tf.int32))
outputs = r3det_gwd.build_whole_detection_network(input_img_batch=img,
gtboxes_batch_h=gtboxes_and_label_h,
gtboxes_batch_r=gtboxes_and_label_r,
gpu_id=i)
gtboxes_in_img_h = self.drawer.draw_boxes_with_categories(img_batch=img,
boxes=gtboxes_and_label_h[
:, :-1],
labels=gtboxes_and_label_h[
:, -1],
method=0)
gtboxes_in_img_r = self.drawer.draw_boxes_with_categories(img_batch=img,
boxes=gtboxes_and_label_r[
:, :-1],
labels=gtboxes_and_label_r[
:, -1],
method=1)
tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h)
tf.summary.image('Compare/gtboxes_r_gpu:%d' % i, gtboxes_in_img_r)
if cfgs.ADD_BOX_IN_TENSORBOARD:
detections_in_img = self.drawer.draw_boxes_with_categories_and_scores(
img_batch=img,
boxes=outputs[0],
scores=outputs[1],
labels=outputs[2],
method=1)
tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img)
loss_dict = outputs[-1]
total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu)
if i == num_gpu - 1:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
# weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
total_losses = total_losses + tf.add_n(regularization_losses)
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(total_losses)
if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:
grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)
tower_grads.append(grads)
self.log_printer(r3det_gwd, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph)
if __name__ == '__main__':
trainer = TrainR3DetGWD(cfgs)
trainer.main() | return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \
gtboxes_and_label_r[:int(num_objects), :].astype(np.float32) |
secret.py | import os
from google.cloud import secretmanager
class Secret:
def | (self):
# Create the Secret Manager client.
self.client = secretmanager.SecretManagerServiceClient()
self.project_id = os.getenv('GOOGLE_CLOUD_PROJECT')
def get_secret(self, secret_id):
# Build the parent name from the project.
name = f"projects/{self.project_id}/secrets/{secret_id}/versions/latest"
# Access the secret version.
response = self.client.access_secret_version(request={"name": name})
# Print the secret payload.
#
# WARNING: Do not print the secret in a production environment - this
# snippet is showing how to access the secret material.
return response.payload.data.decode("UTF-8")
| __init__ |
pinyinsplit.py | from pygtrie import CharTrie
import copy
"""
Split a Chinese Pinyin phrase into a list of possible permutations of Pinyin words.This is the "example" module.
For example,
>>> from pinyinsplit import PinyinSplit
>>> pys = PinyinSplit()
>>> pys.split('XiangGangDaXue')
[['Xiang', 'Gang', 'Da', 'Xue'], ['Xiang', 'Gang', 'Da', 'Xu', 'e'], ['Xi', 'ang', 'Gang', 'Da', 'Xue'], ['Xi', 'ang', 'Gang', 'Da', 'Xu', 'e']]
"""
class PinyinSplit:
| _name__ == "__main__":
import doctest
doctest.testmod() | """Split a Chinese Pinyin phrase into a list of possible permutations of Pinyin words.
It returns a list of all possible permutations of valid Pinyin words.
If the Pinyin phrase cannot be exhaustively split into valid Pinyin words, an empty list will be returned.
>>> from pinyinsplit import PinyinSplit
>>> pys = PinyinSplit()
>>> pys.split('shediaoyingxiongchuan')
[['she', 'diao', 'ying', 'xiong', 'chuan'], ['she', 'diao', 'ying', 'xiong', 'chu', 'an'], ['she', 'di', 'ao', 'ying', 'xiong', 'chuan'], ['she', 'di', 'ao', 'ying', 'xiong', 'chu', 'an']]
>>> pys.split('shediaoyingxiongchuanxyz')
[]
"""
pylist = [
'a', 'ai', 'an', 'ang', 'ao',
'ba', 'bai', 'ban', 'bang', 'bao', 'bei', 'ben', 'beng',
'bi', 'bian', 'biang', 'biao', 'bie', 'bin', 'bing', 'bo', 'bu',
'ca', 'cai', 'can', 'cang', 'cao', 'ce', 'cen', 'ceng',
'cha', 'chai', 'chan', 'chang', 'chao', 'che', 'chen', 'cheng',
'chi', 'chong', 'chou', 'chu', 'chua', 'chuai', 'chuan', 'chuang', 'chui', 'chun', 'chuo',
'ci', 'cong', 'cou', 'cu', 'cuan', 'cui', 'cun', 'cuo',
'da', 'dai', 'dan', 'dang', 'dao', 'de', 'dei', 'den', 'deng',
'di', 'dia', 'dian', 'diang', 'diao', 'die', 'ding', 'diu',
'dong', 'dou', 'du', 'duan', 'dui', 'dun', 'duo',
'e', 'ei', 'en', 'eng', 'er',
'fa', 'fan', 'fang', 'fei', 'fen', 'feng', 'fiao',
'fo', 'fou', 'fu', 'ga', 'gai', 'gan', 'gang', 'gao',
'ge', 'gei', 'gen', 'geng', 'gong', 'gou',
'gu', 'gua', 'guai', 'guan', 'guang', 'gui', 'gun', 'guo',
'ha', 'hai', 'han', 'hang', 'hao', 'he', 'hei', 'hen', 'heng',
'hong', 'hou', 'hu', 'hua', 'huai', 'huan', 'huang', 'hui', 'hun', 'huo',
'ji', 'jia', 'jian', 'jiang', 'jiao', 'jie', 'jin', 'jing', 'jiong', 'jiu', 'ju', 'juan', 'jue', 'jun',
'ka', 'kai', 'kan', 'kang', 'kao', 'ke', 'kei', 'ken', 'keng',
'kong', 'kou', 'ku', 'kua', 'kuai', 'kuan', 'kuang', 'kui', 'kun', 'kuo',
'la', 'lai', 'lan', 'lang', 'lao', 'le', 'lei', 'leng',
'li', 'lia', 'lian', 'liang', 'liao', 'lie', 'lin', 'ling', 'liu', 'long', 'lou',
'lu', 'luan', 'lue', 'lun', 'luo', 'lv', 'lve', 'lvn', 'lü', 'lüe', 'lün',
'ma', 'mai', 'man', 'mang', 'mao', 'me', 'mei', 'men', 'meng',
'mi', 'mian', 'miao', 'mie', 'min', 'ming', 'miu', 'mo', 'mou', 'mu',
'na', 'nai', 'nan', 'nang', 'nao', 'ne', 'nei', 'nen', 'neng',
'ni', 'nia', 'nian', 'niang', 'niao', 'nie', 'nin', 'ning', 'niu',
'nong', 'nou', 'nu', 'nuan', 'nue', 'nun', 'nuo', 'nv', 'nve', 'nü', 'nüe', 'ou',
'pa', 'pai', 'pan', 'pang', 'pao', 'pei', 'pen', 'peng',
'pi', 'pian', 'piao', 'pie', 'pin', 'ping', 'po', 'pou', 'pu',
'qi', 'qia', 'qian', 'qiang', 'qiao', 'qie',
'qin', 'qing', 'qiong', 'qiu', 'qu', 'quan', 'que', 'qun',
'ran', 'rang', 'rao', 're', 'ren', 'reng', 'ri', 'rong', 'rou',
'ru', 'rua', 'ruan', 'rui', 'run', 'ruo',
'sa', 'sai', 'san', 'sang', 'sao', 'se', 'sei', 'sen', 'seng',
'sha', 'shai', 'shan', 'shang', 'shao', 'she', 'shei', 'shen', 'sheng', 'shi',
'shong', 'shou', 'shu', 'shua', 'shuai', 'shuan', 'shuang', 'shui', 'shun', 'shuo',
'si', 'song', 'sou', 'su', 'suan', 'sui', 'sun', 'suo',
'ta', 'tai', 'tan', 'tang', 'tao', 'te', 'tei', 'teng',
'ti', 'tian', 'tiao', 'tie', 'ting', 'tong', 'tou',
'tu', 'tuan', 'tui', 'tun', 'tuo',
'wa', 'wai', 'wan', 'wang', 'wei', 'wen', 'weng', 'wo', 'wu',
'xi', 'xia', 'xian', 'xiang', 'xiao', 'xie', 'xin', 'xing', 'xiong', 'xiu', 'xu', 'xuan', 'xue', 'xun',
'ya', 'yai', 'yan', 'yang', 'yao', 'ye', 'yi', 'yin', 'ying',
'yo', 'yong', 'you', 'yu', 'yuan', 'yue', 'yun',
'za', 'zai', 'zan', 'zang', 'zao', 'ze', 'zei', 'zen', 'zeng',
'zha', 'zhai', 'zhan', 'zhang', 'zhao', 'zhe', 'zhei', 'zhen', 'zheng',
'zhi', 'zhong', 'zhou', 'zhu', 'zhua', 'zhuai', 'zhuan', 'zhuang', 'zhui', 'zhun', 'zhuo',
'zi', 'zong', 'zou', 'zu', 'zuan', 'zui', 'zun', 'zuo', 'ê'
]
def __init__(self):
self.trie = CharTrie()
for py in self.pylist:
self.trie[py] = len(py)
def split(self, phrase):
phrase_lc = phrase.lower()
split_list = []
results = []
if phrase:
split_list.append((phrase, phrase_lc, []))
while split_list:
pair = split_list.pop()
phrase = pair[0]
phrase_lc = pair[1]
words = pair[2]
matches = self.trie.prefixes(phrase_lc)
for match in matches:
n = match[1]
word = phrase[:n]
tail = phrase[n:]
tail_lc = phrase_lc[n:]
words_copy = copy.deepcopy(words)
words_copy.append(word)
if tail:
split_list.append((tail, tail_lc, words_copy))
else:
results.append(words_copy)
return results
if _ |
fifo.go | package fifo
import (
"database/sql"
"errors"
"github.com/cxjava/go-engine/loggo"
_ "github.com/mattn/go-sqlite3"
"strconv"
)
type FiFo struct {
name string
max int
db *sql.DB
insertJobStmt *sql.Stmt
getJobStmt *sql.Stmt
deleteJobStmt *sql.Stmt
sizeDoneStmt *sql.Stmt
}
func NewFIFO(dsn string, conn int, name string) (*FiFo, error) {
f := &FiFo{name: name}
gdb, err := sql.Open("mysql", dsn)
if err != nil {
loggo.Error("open mysql fail %v", err)
return nil, err
}
err = gdb.Ping()
if err != nil {
loggo.Error("open mysql fail %v", err)
return nil, err
}
gdb.SetConnMaxLifetime(0)
gdb.SetMaxIdleConns(conn)
gdb.SetMaxOpenConns(conn)
_, err = gdb.Exec("CREATE DATABASE IF NOT EXISTS fifo")
if err != nil {
loggo.Error("CREATE DATABASE fail %v", err)
return nil, err
}
_, err = gdb.Exec("CREATE TABLE IF NOT EXISTS fifo." + name + " (" +
"id int NOT NULL AUTO_INCREMENT," + | "data text NOT NULL," +
"PRIMARY KEY (id)" +
"); ")
if err != nil {
loggo.Error("CREATE TABLE fail %v", err)
return nil, err
}
stmt, err := gdb.Prepare("insert into fifo." + name + "(data) values(?)")
if err != nil {
loggo.Error("Prepare sqlite3 fail %v", err)
return nil, err
}
f.insertJobStmt = stmt
stmt, err = gdb.Prepare("select id,data from fifo." + name + " limit 0,?")
if err != nil {
loggo.Error("Prepare sqlite3 fail %v", err)
return nil, err
}
f.getJobStmt = stmt
stmt, err = gdb.Prepare("delete from fifo." + name + " where id = ?")
if err != nil {
loggo.Error("Prepare sqlite3 fail %v", err)
return nil, err
}
f.deleteJobStmt = stmt
stmt, err = gdb.Prepare("select count(*) from fifo." + name + "")
if err != nil {
loggo.Error("Prepare sqlite3 fail %v", err)
return nil, err
}
f.sizeDoneStmt = stmt
return f, nil
}
func NewFIFOLocal(name string, max int) (*FiFo, error) {
f := &FiFo{name: name, max: max}
gdb, err := sql.Open("sqlite3", "./fifo_"+name+".db")
if err != nil {
loggo.Error("open sqlite3 Job fail %v", err)
return nil, err
}
f.db = gdb
gdb.Exec("CREATE TABLE IF NOT EXISTS [data_info](" +
"[id] INTEGER PRIMARY KEY AUTOINCREMENT," +
"[data] TEXT NOT NULL);")
stmt, err := gdb.Prepare("insert into data_info(data) values(?)")
if err != nil {
loggo.Error("Prepare sqlite3 fail %v", err)
return nil, err
}
f.insertJobStmt = stmt
stmt, err = gdb.Prepare("select id,data from data_info limit 0,?")
if err != nil {
loggo.Error("Prepare sqlite3 fail %v", err)
return nil, err
}
f.getJobStmt = stmt
stmt, err = gdb.Prepare("delete from data_info where id = ?")
if err != nil {
loggo.Error("Prepare sqlite3 fail %v", err)
return nil, err
}
f.deleteJobStmt = stmt
stmt, err = gdb.Prepare("select count(*) from data_info")
if err != nil {
loggo.Error("Prepare sqlite3 fail %v", err)
return nil, err
}
f.sizeDoneStmt = stmt
return f, nil
}
func (f *FiFo) Write(data string) error {
if f.max > 0 && f.GetSize() >= f.max {
return errors.New("fifo max " + strconv.Itoa(f.max))
}
_, err := f.insertJobStmt.Exec(data)
if err != nil {
loggo.Info("Write fail %v", err)
return err
}
//loggo.Info("Write ok %s", data)
return nil
}
func (f *FiFo) Read(n int) ([]string, error) {
ids, datas, err := f.read(n)
if err != nil {
return nil, err
}
for _, id := range ids {
_, err = f.deleteJobStmt.Exec(id)
if err != nil {
loggo.Info("Read delete fail %v", err)
return nil, err
}
}
//loggo.Info("Read ok %d %s", id, data)
return datas, nil
}
func (f *FiFo) read(n int) ([]int, []string, error) {
var ids []int
var datas []string
rows, err := f.getJobStmt.Query(n)
if err != nil {
//loggo.Info("Read Scan fail %v", err)
return nil, nil, err
}
defer rows.Close()
for rows.Next() {
var id int
var data string
err := rows.Scan(&id, &data)
if err != nil {
loggo.Info("Scan sqlite3 fail %v", err)
return nil, nil, err
}
ids = append(ids, id)
datas = append(datas, data)
}
return ids, datas, nil
}
func (f *FiFo) GetSize() int {
var ret int
err := f.sizeDoneStmt.QueryRow().Scan(&ret)
if err != nil {
loggo.Info("GetSize fail %v", err)
return 0
}
return ret
} | |
index.ts | /**
* @license
* Copyright Google Inc. All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.io/license
*/
import {
BuildEvent,
Builder,
BuilderConfiguration,
BuilderContext,
} from '@angular-devkit/architect';
import { WebpackDevServerBuilder } from '@angular-devkit/build-webpack';
import { Path, getSystemPath, resolve, tags, virtualFs } from '@angular-devkit/core';
import { Stats, existsSync, readFileSync } from 'fs';
import * as path from 'path';
import { Observable, throwError } from 'rxjs';
import { concatMap, map, tap } from 'rxjs/operators';
import * as url from 'url';
import * as webpack from 'webpack';
import * as WebpackDevServer from 'webpack-dev-server';
import { checkPort } from '../angular-cli-files/utilities/check-port';
import { BrowserBuilder, NormalizedBrowserBuilderSchema, getBrowserLoggingCb } from '../browser/';
import { BrowserBuilderSchema } from '../browser/schema';
import { normalizeAssetPatterns, normalizeFileReplacements, normalizeSourceMaps } from '../utils';
const opn = require('opn');
export interface DevServerBuilderOptions extends Pick<BrowserBuilderSchema,
'optimization' | 'aot' | 'sourceMap' | 'vendorSourceMap'
| 'evalSourceMap' | 'vendorChunk' | 'commonChunk' | 'poll'
| 'baseHref' | 'deployUrl' | 'progress' | 'verbose'
> {
browserTarget: string;
port: number;
host: string;
proxyConfig?: string;
ssl: boolean;
sslKey?: string;
sslCert?: string;
open: boolean;
liveReload: boolean;
publicHost?: string;
servePath?: string;
disableHostCheck: boolean;
hmr: boolean;
watch: boolean;
hmrWarning: boolean;
servePathDefaultWarning: boolean;
}
type DevServerBuilderOptionsKeys = Extract<keyof DevServerBuilderOptions, string>;
export class | implements Builder<DevServerBuilderOptions> {
constructor(public context: BuilderContext) { }
run(builderConfig: BuilderConfiguration<DevServerBuilderOptions>): Observable<BuildEvent> {
const options = builderConfig.options;
const root = this.context.workspace.root;
const projectRoot = resolve(root, builderConfig.root);
const host = new virtualFs.AliasHost(this.context.host as virtualFs.Host<Stats>);
const webpackDevServerBuilder = new WebpackDevServerBuilder({ ...this.context, host });
let browserOptions: BrowserBuilderSchema;
let first = true;
let opnAddress: string;
return checkPort(options.port, options.host).pipe(
tap((port) => options.port = port),
concatMap(() => this._getBrowserOptions(options)),
tap((opts) => browserOptions = opts),
concatMap(() => normalizeFileReplacements(browserOptions.fileReplacements, host, root)),
tap(fileReplacements => browserOptions.fileReplacements = fileReplacements),
concatMap(() => normalizeAssetPatterns(
browserOptions.assets, host, root, projectRoot, builderConfig.sourceRoot)),
// Replace the assets in options with the normalized version.
tap((assetPatternObjects => browserOptions.assets = assetPatternObjects)),
tap(() => {
const normalizedOptions = normalizeSourceMaps(browserOptions.sourceMap);
// todo: remove when removing the deprecations
normalizedOptions.vendorSourceMap
= normalizedOptions.vendorSourceMap || !!browserOptions.vendorSourceMap;
browserOptions = {
...browserOptions,
...normalizedOptions,
};
}),
concatMap(() => {
const webpackConfig = this.buildWebpackConfig(
root, projectRoot, host, browserOptions as NormalizedBrowserBuilderSchema);
let webpackDevServerConfig: WebpackDevServer.Configuration;
try {
webpackDevServerConfig = this._buildServerConfig(root, options, browserOptions);
} catch (err) {
return throwError(err);
}
// Resolve public host and client address.
let clientAddress = `${options.ssl ? 'https' : 'http'}://0.0.0.0:0`;
if (options.publicHost) {
let publicHost = options.publicHost;
if (!/^\w+:\/\//.test(publicHost)) {
publicHost = `${options.ssl ? 'https' : 'http'}://${publicHost}`;
}
const clientUrl = url.parse(publicHost);
options.publicHost = clientUrl.host;
clientAddress = url.format(clientUrl);
}
// Resolve serve address.
const serverAddress = url.format({
protocol: options.ssl ? 'https' : 'http',
hostname: options.host === '0.0.0.0' ? 'localhost' : options.host,
port: options.port.toString(),
});
// Add live reload config.
if (options.liveReload) {
this._addLiveReload(options, browserOptions, webpackConfig, clientAddress);
} else if (options.hmr) {
this.context.logger.warn('Live reload is disabled. HMR option ignored.');
}
if (!options.watch) {
// There's no option to turn off file watching in webpack-dev-server, but
// we can override the file watcher instead.
webpackConfig.plugins.unshift({
// tslint:disable-next-line:no-any
apply: (compiler: any) => {
compiler.hooks.afterEnvironment.tap('angular-cli', () => {
compiler.watchFileSystem = { watch: () => { } };
});
},
});
}
if (browserOptions.optimization) {
this.context.logger.error(tags.stripIndents`
****************************************************************************************
This is a simple server for use in testing or debugging Angular applications locally.
It hasn't been reviewed for security issues.
DON'T USE IT FOR PRODUCTION!
****************************************************************************************
`);
}
this.context.logger.info(tags.oneLine`
**
Angular Live Development Server is listening on ${options.host}:${options.port},
open your browser on ${serverAddress}${webpackDevServerConfig.publicPath}
**
`);
opnAddress = serverAddress + webpackDevServerConfig.publicPath;
webpackConfig.devServer = webpackDevServerConfig;
return webpackDevServerBuilder.runWebpackDevServer(
webpackConfig, undefined, getBrowserLoggingCb(browserOptions.verbose),
);
}),
map(buildEvent => {
if (first && options.open) {
first = false;
opn(opnAddress);
}
return buildEvent;
}),
// using more than 10 operators will cause rxjs to loose the types
) as Observable<BuildEvent>;
}
buildWebpackConfig(
root: Path,
projectRoot: Path,
host: virtualFs.Host<Stats>,
browserOptions: BrowserBuilderSchema,
) {
const browserBuilder = new BrowserBuilder(this.context);
const webpackConfig = browserBuilder.buildWebpackConfig(
root, projectRoot, host, browserOptions as NormalizedBrowserBuilderSchema);
return webpackConfig;
}
private _buildServerConfig(
root: Path,
options: DevServerBuilderOptions,
browserOptions: BrowserBuilderSchema,
) {
const systemRoot = getSystemPath(root);
if (options.disableHostCheck) {
this.context.logger.warn(tags.oneLine`
WARNING: Running a server with --disable-host-check is a security risk.
See https://medium.com/webpack/webpack-dev-server-middleware-security-issues-1489d950874a
for more information.
`);
}
const servePath = this._buildServePath(options, browserOptions);
const config: WebpackDevServer.Configuration = {
host: options.host,
port: options.port,
headers: { 'Access-Control-Allow-Origin': '*' },
historyApiFallback: {
index: `${servePath}/${path.basename(browserOptions.index)}`,
disableDotRule: true,
htmlAcceptHeaders: ['text/html', 'application/xhtml+xml'],
} as WebpackDevServer.HistoryApiFallbackConfig,
stats: false,
compress: browserOptions.optimization,
watchOptions: {
poll: browserOptions.poll,
},
https: options.ssl,
overlay: {
errors: !browserOptions.optimization,
warnings: false,
},
public: options.publicHost,
disableHostCheck: options.disableHostCheck,
publicPath: servePath,
hot: options.hmr,
contentBase: false,
};
if (options.ssl) {
this._addSslConfig(systemRoot, options, config);
}
if (options.proxyConfig) {
this._addProxyConfig(systemRoot, options, config);
}
return config;
}
private _addLiveReload(
options: DevServerBuilderOptions,
browserOptions: BrowserBuilderSchema,
webpackConfig: any, // tslint:disable-line:no-any
clientAddress: string,
) {
// This allows for live reload of page when changes are made to repo.
// https://webpack.js.org/configuration/dev-server/#devserver-inline
let webpackDevServerPath;
try {
webpackDevServerPath = require.resolve('webpack-dev-server/client');
} catch {
throw new Error('The "webpack-dev-server" package could not be found.');
}
const entryPoints = [`${webpackDevServerPath}?${clientAddress}`];
if (options.hmr) {
const webpackHmrLink = 'https://webpack.js.org/guides/hot-module-replacement';
this.context.logger.warn(
tags.oneLine`NOTICE: Hot Module Replacement (HMR) is enabled for the dev server.`);
const showWarning = options.hmrWarning;
if (showWarning) {
this.context.logger.info(tags.stripIndents`
The project will still live reload when HMR is enabled,
but to take advantage of HMR additional application code is required'
(not included in an Angular CLI project by default).'
See ${webpackHmrLink}
for information on working with HMR for Webpack.`,
);
this.context.logger.warn(
tags.oneLine`To disable this warning use "hmrWarning: false" under "serve"
options in "angular.json".`,
);
}
entryPoints.push('webpack/hot/dev-server');
webpackConfig.plugins.push(new webpack.HotModuleReplacementPlugin());
if (browserOptions.extractCss) {
this.context.logger.warn(tags.oneLine`NOTICE: (HMR) does not allow for CSS hot reload
when used together with '--extract-css'.`);
}
}
if (!webpackConfig.entry.main) { webpackConfig.entry.main = []; }
webpackConfig.entry.main.unshift(...entryPoints);
}
private _addSslConfig(
root: string,
options: DevServerBuilderOptions,
config: WebpackDevServer.Configuration,
) {
let sslKey: string | undefined = undefined;
let sslCert: string | undefined = undefined;
if (options.sslKey) {
const keyPath = path.resolve(root, options.sslKey);
if (existsSync(keyPath)) {
sslKey = readFileSync(keyPath, 'utf-8');
}
}
if (options.sslCert) {
const certPath = path.resolve(root, options.sslCert);
if (existsSync(certPath)) {
sslCert = readFileSync(certPath, 'utf-8');
}
}
config.https = true;
if (sslKey != null && sslCert != null) {
config.https = {
key: sslKey,
cert: sslCert,
};
}
}
private _addProxyConfig(
root: string,
options: DevServerBuilderOptions,
config: WebpackDevServer.Configuration,
) {
let proxyConfig = {};
const proxyPath = path.resolve(root, options.proxyConfig as string);
if (existsSync(proxyPath)) {
proxyConfig = require(proxyPath);
} else {
const message = 'Proxy config file ' + proxyPath + ' does not exist.';
throw new Error(message);
}
config.proxy = proxyConfig;
}
private _buildServePath(options: DevServerBuilderOptions, browserOptions: BrowserBuilderSchema) {
let servePath = options.servePath;
if (!servePath && servePath !== '') {
const defaultServePath =
this._findDefaultServePath(browserOptions.baseHref, browserOptions.deployUrl);
const showWarning = options.servePathDefaultWarning;
if (defaultServePath == null && showWarning) {
this.context.logger.warn(tags.oneLine`
WARNING: --deploy-url and/or --base-href contain
unsupported values for ng serve. Default serve path of '/' used.
Use --serve-path to override.
`);
}
servePath = defaultServePath || '';
}
if (servePath.endsWith('/')) {
servePath = servePath.substr(0, servePath.length - 1);
}
if (!servePath.startsWith('/')) {
servePath = `/${servePath}`;
}
return servePath;
}
private _findDefaultServePath(baseHref?: string, deployUrl?: string): string | null {
if (!baseHref && !deployUrl) {
return '';
}
if (/^(\w+:)?\/\//.test(baseHref || '') || /^(\w+:)?\/\//.test(deployUrl || '')) {
// If baseHref or deployUrl is absolute, unsupported by ng serve
return null;
}
// normalize baseHref
// for ng serve the starting base is always `/` so a relative
// and root relative value are identical
const baseHrefParts = (baseHref || '')
.split('/')
.filter(part => part !== '');
if (baseHref && !baseHref.endsWith('/')) {
baseHrefParts.pop();
}
const normalizedBaseHref = baseHrefParts.length === 0 ? '/' : `/${baseHrefParts.join('/')}/`;
if (deployUrl && deployUrl[0] === '/') {
if (baseHref && baseHref[0] === '/' && normalizedBaseHref !== deployUrl) {
// If baseHref and deployUrl are root relative and not equivalent, unsupported by ng serve
return null;
}
return deployUrl;
}
// Join together baseHref and deployUrl
return `${normalizedBaseHref}${deployUrl || ''}`;
}
private _getBrowserOptions(options: DevServerBuilderOptions) {
const architect = this.context.architect;
const [project, target, configuration] = options.browserTarget.split(':');
const overridesOptions: DevServerBuilderOptionsKeys[] = [
'watch',
'optimization',
'aot',
'sourceMap',
'vendorSourceMap',
'evalSourceMap',
'vendorChunk',
'commonChunk',
'baseHref',
'progress',
'poll',
'verbose',
'deployUrl',
];
// remove options that are undefined or not to be overrriden
const overrides = (Object.keys(options) as DevServerBuilderOptionsKeys[])
.filter(key => options[key] !== undefined && overridesOptions.includes(key))
.reduce<Partial<BrowserBuilderSchema>>((previous, key) => (
{
...previous,
[key]: options[key],
}
), {});
const browserTargetSpec = { project, target, configuration, overrides };
const builderConfig = architect.getBuilderConfiguration<BrowserBuilderSchema>(
browserTargetSpec);
return architect.getBuilderDescription(builderConfig).pipe(
concatMap(browserDescription =>
architect.validateBuilderOptions(builderConfig, browserDescription)),
map(browserConfig => browserConfig.options),
);
}
}
export default DevServerBuilder;
| DevServerBuilder |
factory_stub.go | package payloadtest
import (
"github.com/short-d/app/fw"
"github.com/short-d/short/app/entity"
"github.com/short-d/short/app/usecase/auth/payload"
)
var _ payload.Factory = (*FactoryStub)(nil)
// FactoryStub creates payloads based on preset data.
type FactoryStub struct {
Payload payload.Payload
TokenErr error
UserErr error
}
// FromTokenPayload creates payload based on preset payload and error.
func (f FactoryStub) FromTokenPayload(tokenPayload fw.TokenPayload) (payload.Payload, error) {
return f.Payload, f.TokenErr
} | } |
// FromUser creates payload based on preset payload and error.
func (f FactoryStub) FromUser(user entity.User) (payload.Payload, error) {
return f.Payload, f.UserErr |
arguments.py | """
Common arguments for BabyAI training scripts
"""
import os
import argparse
import numpy as np
class ArgumentParser(argparse.ArgumentParser):
def __init__(self):
super().__init__()
# Base arguments
self.add_argument("--env", default=None,
help="name of the environment to train on (REQUIRED)")
self.add_argument("--model", default=None,
help="name of the model (default: ENV_ALGO_TIME)")
self.add_argument("--pretrained-model", default=None,
help='If you\'re using a pre-trained model and want the fine-tuned one to have a new name')
self.add_argument("--seed", type=int, default=1,
help="random seed; if 0, a random random seed will be used (default: 1)")
self.add_argument("--task-id-seed", action='store_true',
help="use the task id within a Slurm job array as the seed")
self.add_argument("--procs", type=int, default=64,
help="number of processes (default: 64)")
self.add_argument("--tb", action="store_true", default=False,
help="log into Tensorboard")
# Training arguments
self.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default(Mathijs): 1, used to be 10)")
self.add_argument("--save-interval", type=int, default=1000,
help="number of updates between two saves (default: 1000, 0 means no saving)")
self.add_argument("--frames", type=int, default=int(9e10),
help="number of frames of training (default: 9e10)")
self.add_argument("--patience", type=int, default=100,
help="patience for early stopping (default: 100)")
self.add_argument("--epochs", type=int, default=1000000,
help="maximum number of epochs")
self.add_argument("--frames-per-proc", type=int, default=40,
help="number of frames per process before update (default: 40)")
self.add_argument("--lr", type=float, default=1e-4,
help="learning rate (default: 1e-4)")
self.add_argument("--beta1", type=float, default=0.9,
help="beta1 for Adam (default: 0.9)")
self.add_argument("--beta2", type=float, default=0.999,
help="beta2 for Adam (default: 0.999)")
self.add_argument("--recurrence", type=int, default=20,
help="number of timesteps gradient is backpropagated (default: 20)")
self.add_argument("--optim-eps", type=float, default=1e-5,
help="Adam and RMSprop optimizer epsilon (default: 1e-5)")
self.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer apha (default: 0.99)")
self.add_argument("--batch-size", type=int, default=1280,
help="batch size for PPO (default: 1280)")
self.add_argument("--entropy-coef", type=float, default=0.01,
help="entropy term coefficient (default: 0.01)")
self.add_argument("--dropout", type=float, default=0.5,
help="dropout probability for processed corrections (default: 0.5)")
self.add_argument("--save-each-epoch", action="store_true", default=False,
help="store model at each epoch")
self.add_argument("--class-weights", action="store_true", default=False,
help="use class weights in loss function")
self.add_argument("--compute-cic", action="store_true", default=False,
help="compute and log causal influence of communication metric after each epoch")
# Model parameters
self.add_argument("--image-dim", type=int, default=128,
help="dimensionality of the image embedding")
self.add_argument("--memory-dim", type=int, default=128,
help="dimensionality of the memory LSTM")
self.add_argument("--instr-dim", type=int, default=128,
help="dimensionality of the memory LSTM")
self.add_argument("--no-instr", action="store_true", default=False,
help="don't use instructions in the model")
self.add_argument("--instr-arch", default="gru",
help="arch to encode instructions, possible values: gru, bigru, conv, bow (default: gru)")
self.add_argument("--no-mem", action="store_true", default=False,
help="don't use memory in the model")
self.add_argument("--arch", default='expert_filmcnn',
help="image embedding architecture")
self.add_argument("--learner", action="store_true", default=False,
help="use ordinary learner")
# Corrector parameters
self.add_argument("--corrector", action="store_true", default=False,
help="use correction module")
self.add_argument("--corr-length", type=int, default=2,
help="length of correction messages (max length if --var-corr-length true)")
self.add_argument("--corr-own-vocab", action="store_true", default=False,
help="corrector uses its own vocabulary instead of instruction vocabulary")
self.add_argument("--corr-embedding-dim", type=int, default=0,
help="embedding dimensionality for corrector")
self.add_argument("--corr-vocab-size", type=int, default=3,
help="vocabulary size of corrector")
self.add_argument("--pretrained-corrector", type=str, default=None,
help="location of pretrained corrector to use and freeze")
self.add_argument("--show-corrections", action="store_true", default=False,
help="show correction messages")
self.add_argument("--corrector-frozen", action="store_true", default=False,
help="freeze pretrained corrector")
self.add_argument("--random-corrector", action="store_true", default=False,
help="randomize correction messages")
self.add_argument("--var-corr-length", action="store_true", default=False,
help="variable length correction messages with penalty for longer ones")
self.add_argument("--corr-loss-coef", type=float, default=0.1,
help="correction loss coefficient (untested default: 0.1)")
self.add_argument("--weigh-corrections", action="store_true", default=False,
help="weigh corrections depending on entropy of previous timestep")
self.add_argument("--correction-weight-loss-coef", type=float, default=1.0,
help="coefficient for correction weight loss")
# Validation parameters
self.add_argument("--val-seed", type=int, default=0,
help="seed for environment used for validation (default: 0)")
self.add_argument("--val-interval", type=int, default=1,
help="number of epochs between two validation checks (default: 1)")
self.add_argument("--val-episodes", type=int, default=500,
help="number of episodes used to evaluate the agent, and to evaluate validation accuracy")
def | (self):
"""
Parse the arguments and perform some basic validation
"""
args = super().parse_args()
# Set seed for all randomness sources
if args.seed == 0:
args.seed = np.random.randint(10000)
if args.task_id_seed:
args.seed = int(os.environ['SLURM_ARRAY_TASK_ID'])
print('set seed to {}'.format(args.seed))
# TODO: more validation
return args
| parse_args |
get_public_get_instruments_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package public
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
models "github.com/scancel/go-deribit/v3/models"
)
// GetPublicGetInstrumentsReader is a Reader for the GetPublicGetInstruments structure.
type GetPublicGetInstrumentsReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetPublicGetInstrumentsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetPublicGetInstrumentsOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("unknown error", response, response.Code())
}
}
// NewGetPublicGetInstrumentsOK creates a GetPublicGetInstrumentsOK with default headers values
func NewGetPublicGetInstrumentsOK() *GetPublicGetInstrumentsOK {
return &GetPublicGetInstrumentsOK{}
}
/*GetPublicGetInstrumentsOK handles this case with default header values.
ok response
*/
type GetPublicGetInstrumentsOK struct {
Payload *models.PublicGetInstrumentsResponse
}
func (o *GetPublicGetInstrumentsOK) Error() string {
return fmt.Sprintf("[GET /public/get_instruments][%d] getPublicGetInstrumentsOK %+v", 200, o.Payload)
}
func (o *GetPublicGetInstrumentsOK) GetPayload() *models.PublicGetInstrumentsResponse {
return o.Payload
}
func (o *GetPublicGetInstrumentsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.PublicGetInstrumentsResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF |
return nil
}
| {
return err
} |
xtour-client.js | //
// xtour-client
//
// Version: 2.25
// Build: 1.0.56236
function XTourClient(requestURL, activeRequestLimit) {
"use strict";
if (typeof activeRequestLimit !== "number" || activeRequestLimit <= 0) {
activeRequestLimit = Number.MAX_VALUE;
}
requestURL = findRequestURL(requestURL);
var activateCORS = isCrossDomain(),
sendRequest = selectSendMethod(),
requestHeader = { "Content-Type": "application/json;charset=UTF-8" },
activeRequestCount = 0,
requestQueue = [],
client = this,
toBase64 = (typeof window.btoa === "function") ? window.btoa : function (input) {
var out = "",
end = input.length - 2,
i = 0,
BASE64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=",
c, d, e;
while (i < end) {
c = input.charCodeAt(i++);
d = input.charCodeAt(i++);
e = input.charCodeAt(i++);
out += BASE64.charAt(c >> 2) + BASE64.charAt(((c & 3) << 4) | (d >> 4)) +
BASE64.charAt(((d & 15) << 2) | (e >> 6)) + BASE64.charAt(e & 63);
}
if (i === end) {
c = input.charCodeAt(i++);
d = input.charCodeAt(i);
out += BASE64.charAt(c >> 2) + BASE64.charAt(((c & 3) << 4) | (d >> 4)) + BASE64.charAt((d & 15) << 2) + "=";
} else if (i === end + 1) {
c = input.charCodeAt(i);
out += BASE64.charAt(c >> 2) + BASE64.charAt(((c & 3) << 4)) + "==";
}
return out;
};
function findRequestURL(requestURL) {
if (!requestURL || typeof requestURL !== "string") {
return findBaseURL() + "rs/XTour/";
} else if(startsWith(requestURL, "http")) {
return requestURL;
} else {
return findBaseURL() + "rs/XTour/" + requestURL + "/";
}
}
function startsWith(str, prefix) {
return str.substring(0, prefix.length) === prefix;
}
function findBaseURL() {
var scripts = document.getElementsByTagName("script");
for (var i = scripts.length - 1; i >= 0; i--) {
if (scripts[i].src) {
var urlParts = scripts[i].src.match(/(.*\/|)([^\/\.]*)/);
if (urlParts[2] === "xtour-client") {
return urlParts[1];
}
}
}
return null;
}
function isCrossDomain() {
var o = window.location;
var a = document.createElement("a");
a.href = requestURL;
return (a.protocol === "http:" || a.protocol === "https:") &&
(o.protocol !== a.protocol || o.port !== a.port || (o.hostname !== a.hostname && document.domain !== a.hostname));
}
function selectSendMethod() {
var hasXMLHttpRequest = (typeof XMLHttpRequest !== "undefined");
if (activateCORS) {
if (hasXMLHttpRequest) {
if ("withCredentials" in new XMLHttpRequest()) {
return sendXMLHttpRequest;
} else if (typeof window.XDomainRequest !== "undefined") {
return sendXDomainRequest;
}
}
throw new Error("This browser does not support CORS.");
} else {
if (!hasXMLHttpRequest) {
try {
new window.ActiveXObject("Microsoft.XMLHTTP");
} catch (ex) {
throw new Error("This browser does not support Ajax.");
}
}
return sendXMLHttpRequest;
}
}
function sendXMLHttpRequest(path, request, callbackHandler, timeout) {
var async = ( typeof callbackHandler === "function"), xhr;
if (async && activeRequestCount >= activeRequestLimit) {
requestQueue.push({ path: path, request: request, callbackHandler: callbackHandler, timeout: timeout });
return null;
}
xhr = (typeof XMLHttpRequest !== "undefined") ? new XMLHttpRequest() : new window.ActiveXObject("Microsoft.XMLHTTP");
xhr.open("POST", requestURL + path, async);
for (var fieldName in requestHeader) {
if (requestHeader.hasOwnProperty(fieldName)) {
xhr.setRequestHeader(fieldName, requestHeader[fieldName]);
}
}
if (async) {
xhr.onreadystatechange = function() {
if (xhr.readyState === 4) {
var responseObject = xhr.responseText && JSON.parse(xhr.responseText);
if (xhr.status === 200) {
callbackHandler(responseObject, null, xhr);
} else {
callbackHandler(null, responseObject, xhr);
}
responseArrived();
}
};
if (timeout) {
xhr.timeout = timeout;
xhr.ontimeout = function() {
callbackHandler(null, null, xhr);
responseArrived();
};
}
xhr.send(JSON.stringify(request));
activeRequestCount++;
return xhr;
} else {
xhr.send(JSON.stringify(request));
var responseObject = xhr.responseText && JSON.parse(xhr.responseText);
if (xhr.status === 200) {
return responseObject;
} else {
throw responseObject;
}
}
}
function sendXDomainRequest(path, request, callbackHandler, timeout) {
if (typeof callbackHandler !== "function") {
throw new Error("This browser does not support CORS with synchroneous requests.");
}
if (activeRequestCount >= activeRequestLimit) {
requestQueue.push({ path: path, request: request, callbackHandler: callbackHandler, timeout: timeout });
return null;
}
var xhr = new window.XDomainRequest();
xhr.open("POST", requestURL + path);
xhr.onload = function() {
var responseObject = xhr.responseText && JSON.parse(xhr.responseText);
callbackHandler(responseObject, null, xhr);
responseArrived();
};
xhr.onerror = function() {
var responseObject = xhr.responseText && JSON.parse(xhr.responseText);
callbackHandler(null, responseObject, xhr);
responseArrived();
};
if (timeout) {
xhr.timeout = timeout;
xhr.ontimeout = function() {
callbackHandler(null, null, xhr);
responseArrived();
};
}
xhr.send(JSON.stringify(request));
activeRequestCount++;
return xhr;
}
function responseArrived() {
var pendingRequest;
activeRequestCount--;
if (activeRequestCount < activeRequestLimit && requestQueue.length) {
pendingRequest = requestQueue.shift();
sendRequest(pendingRequest.path, pendingRequest.request, pendingRequest.callbackHandler, pendingRequest.timeout);
}
}
/**
* Returns the number of currently active http requests.
* @returns {number}
*/
this.getActiveRequestCount = function () {
return activeRequestCount;
};
/**
* Returns the current limit for the number of active http requests.
* @returns {number}
*/
this.getActiveRequestLimit = function () {
return activeRequestLimit;
};
/**
* Sets the current limit for the number of active http requests.
* @param {number} [limit] The maximum number of active http requests to send. A non-positive or omitted number sets the limit to unrestricted.
*/
this.setActiveRequestLimit = function (limit) {
activeRequestLimit = (typeof limit === "number" && limit >= 1) ? Math.floor(limit) : Number.MAX_VALUE;
};
/**
* Returns the number of http requests that are queued because they would have exceeded the request limit.
* @returns {number}
*/
this.getPendingRequestCount = function () {
return requestQueue.length;
};
/**
* Clears the queue of pending http requests, preventing them from being fired.
*/
this.cancelPendingRequests = function () {
requestQueue.length = 0;
};
/**
* Sets a http request header field.
* @param {string} fieldName the name of the request header field to set.
* @param {string} fieldValue the value for the request header field.
* @throws {Error} if a CORS request would be needed for an older IE browser.
*/
this.setRequestHeader = function (fieldName, fieldValue) {
if (sendRequest === sendXDomainRequest) {
throw new Error("This browser does not support CORS with custom request headers.");
}
requestHeader[fieldName] = fieldValue;
};
/**
* Returns the current value of a http request header field.
* @param {string} fieldName the name of the request header field.
* @returns {string} the value of the request header field, or undefined.
*/
this.getRequestHeader = function (fieldName) {
return requestHeader[fieldName];
};
/**
* Sets the http request credentials (user, password) to be used. Set to null to deactivate authentication.
* @param {string} user the user name.
* @param {string} passwd the password.
* @throws {Error} if a CORS request would be needed for an older IE browser.
*/
this.setCredentials = function (user, passwd) {
if (user || passwd) {
if (sendRequest === sendXDomainRequest) {
throw new Error("This browser does not support CORS with basic authentication.");
}
/*
We set the base authentication header manually and avoid setting withCredentials.
This has the very decisive advantage that it actually works with current browsers.
if (activateCORS) xhr.withCredentials = true;
*/
requestHeader["Authorization"] = "Basic " + toBase64(user + ":" + passwd);
} else {
delete requestHeader["Authorization"];
}
};
/**
* Returns the service URL the requests will be sent to after attaching the operation name.
* @returns {string}
*/
this.getRequestURL = function () {
return requestURL;
};
/*
* Generic job runner.
*/
function runJob(startFunction, startFunctionParameters, fetchFunction, onResult, watchRequest, onUpdate, retryOptions) {
if (typeof onResult !== "function") {
throw new Error("Result callback required");
}
if (typeof retryOptions !== "object") {
retryOptions = {};
}
if (typeof retryOptions.retries !== "number") {
retryOptions.retries = 3;
}
if (typeof retryOptions.retryInterval !== "number") {
retryOptions.retryInterval = 1000;
}
if (typeof retryOptions.retryStatusCodes !== "object") {
retryOptions.retryStatusCodes = [0, 502, 503, 504];
}
var tries = 0,
lastKnownJob = null;
startFunctionParameters.push(onStarted);
function onStarted(job, err, xhr) {
if (job === null) {
onResult(null, err, xhr);
} else {
lastKnownJob = job;
watchRequest.id = job.id;
client.watchJob(watchRequest, statusAvailable);
}
}
function statusAvailable(job, err, xhr) {
if (job !== null) {
tries = 0;
lastKnownJob = job;
if (typeof onUpdate === "function") {
onUpdate(job, null, xhr);
}
if (job.status === "QUEUING" || job.status === "RUNNING" || job.status === "STOPPING") {
watchRequest.id = job.id;
client.watchJob(watchRequest, statusAvailable);
} else {
fetchFunction(job.id, null, resultAvailable);
| } else {
if (tries < retryOptions.retries && isPotentiallyRecoverable(err, xhr)) {
tries++;
if (typeof onUpdate === "function") {
onUpdate(null, err, xhr);
}
setTimeout(function () {
watchRequest.id = lastKnownJob.id;
client.watchJob(watchRequest, statusAvailable);
}, retryOptions.retryInterval);
} else {
onResult(null, err, xhr);
}
}
}
function resultAvailable(res, err, xhr) {
if (res !== null) {
onResult(res, null, xhr);
} else {
if (tries < retryOptions.retries && isPotentiallyRecoverable(err, xhr)) {
tries++;
if (typeof onUpdate === "function") {
onUpdate(null, err, xhr);
}
setTimeout(function () {
fetchFunction(lastKnownJob.id, null, resultAvailable);
}, retryOptions.retryInterval);
} else {
onResult(null, err, xhr);
}
}
}
function isPotentiallyRecoverable(err, xhr) {
if (err === null) {
return true;
}
if (xhr && retryOptions.retryStatusCodes) {
for (var i = 0; i < retryOptions.retryStatusCodes.length; i++) {
if (retryOptions.retryStatusCodes[i] === xhr.status) {
return true;
}
}
}
return false;
}
return startFunction.apply(client, startFunctionParameters);
}
/**
* The response handler that is called when a http request has returned.
* @callback ResponseCallback
* @param {object} response the response object, or null if there was an error or timeout.
* @param {object} exception the exception object, or null if there was no problem or a client timeout. In case of a client timeout, both response and exception are null.
* @param {object} xhr the XMLHttpRequest object used for the request.
*/
/**
* Options for the retry behavior of the job runner functions. All properties including the object itself are optional.
* @typedef {object} RetryOptions
* @property {number} [retries] - the maximum number of retries to attempt after failed watch or fetch requests. Will be reset after recovery. Default is 3.
* @property {number} [retryInterval] - the waiting period in ms before the next retry attempt. Default is 1000.
* @property {number[]} [retryStatusCodes] - the array of HTTP status codes that will trigger retries for watchJob and fetch; default is [0,502,503,504]
*/
/**
* This function is called when a job update is available.
* @callback JobUpdateCallback
* @param {Object} job the Job information object, or null if the job information could not be fetched
* @param {Object} error an error description object, or null if there was no error or the watch timed out on client side
* @param {Object} xhr the XmlHttpRequest object, or null in case of cancelled pending requests (error === "abort")
*/
/**
* For the operation itself and its leading parameters please refer to PTV xServer API documentation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} [handler] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the response object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.planTours = function (request , handler, timeout) {
return sendRequest("planTours", request, handler, timeout || this.planTours.timeout);
};
/**
* Job starter service operation. For the operation itself and its leading parameters please refer to PTV xServer API documentation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the job object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.startPlanTours = function (request , handler, timeout) {
return sendRequest("startPlanTours", request, handler, timeout || this.startPlanTours.timeout);
};
/**
* Job runner convenience method that handles start, watching and fetching for the associated job operation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} handler - the callback for the final job result; this callback is required.
* @param {object} [watchRequest] - options passed to watchJob calls.
* @param {JobUpdateCallback} [onUpdate] - callback for job status updates; called for major state changes and progress updates if activated in watchRequest.
* @param {RetryOptions} [retryOptions] - options for watch / fetch retries.
* @returns {object} the XMLHttpRequest object used, or null if the active request limit has been reached.
*/
this.runPlanTours = function (request , handler, watchRequest, onUpdate, retryOptions) {
return runJob(client.startPlanTours, request, client.fetchToursResponse, handler, watchRequest, onUpdate, retryOptions);
};
/**
* For the operation itself and its leading parameters please refer to PTV xServer API documentation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} [handler] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the response object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.changeTours = function (request , handler, timeout) {
return sendRequest("changeTours", request, handler, timeout || this.changeTours.timeout);
};
/**
* Job starter service operation. For the operation itself and its leading parameters please refer to PTV xServer API documentation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the job object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.startChangeTours = function (request , handler, timeout) {
return sendRequest("startChangeTours", request, handler, timeout || this.startChangeTours.timeout);
};
/**
* Job runner convenience method that handles start, watching and fetching for the associated job operation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} handler - the callback for the final job result; this callback is required.
* @param {object} [watchRequest] - options passed to watchJob calls.
* @param {JobUpdateCallback} [onUpdate] - callback for job status updates; called for major state changes and progress updates if activated in watchRequest.
* @param {RetryOptions} [retryOptions] - options for watch / fetch retries.
* @returns {object} the XMLHttpRequest object used, or null if the active request limit has been reached.
*/
this.runChangeTours = function (request , handler, watchRequest, onUpdate, retryOptions) {
return runJob(client.startChangeTours, request, client.fetchToursResponse, handler, watchRequest, onUpdate, retryOptions);
};
/**
* For the operation itself and its leading parameters please refer to PTV xServer API documentation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} [handler] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the response object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.findChangeToursProposals = function (request , handler, timeout) {
return sendRequest("findChangeToursProposals", request, handler, timeout || this.findChangeToursProposals.timeout);
};
/**
* Job starter service operation. For the operation itself and its leading parameters please refer to PTV xServer API documentation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the job object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.startFindChangeToursProposals = function (request , handler, timeout) {
return sendRequest("startFindChangeToursProposals", request, handler, timeout || this.startFindChangeToursProposals.timeout);
};
/**
* Job runner convenience method that handles start, watching and fetching for the associated job operation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} handler - the callback for the final job result; this callback is required.
* @param {object} [watchRequest] - options passed to watchJob calls.
* @param {JobUpdateCallback} [onUpdate] - callback for job status updates; called for major state changes and progress updates if activated in watchRequest.
* @param {RetryOptions} [retryOptions] - options for watch / fetch retries.
* @returns {object} the XMLHttpRequest object used, or null if the active request limit has been reached.
*/
this.runFindChangeToursProposals = function (request , handler, watchRequest, onUpdate, retryOptions) {
return runJob(client.startFindChangeToursProposals, request, client.fetchChangeToursProposalsResponse, handler, watchRequest, onUpdate, retryOptions);
};
/**
* For the operation itself and its leading parameters please refer to PTV xServer API documentation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} [handler] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the response object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.evaluateToursInExecution = function (request , handler, timeout) {
return sendRequest("evaluateToursInExecution", request, handler, timeout || this.evaluateToursInExecution.timeout);
};
/**
* Job starter service operation. For the operation itself and its leading parameters please refer to PTV xServer API documentation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the job object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.startEvaluateToursInExecution = function (request , handler, timeout) {
return sendRequest("startEvaluateToursInExecution", request, handler, timeout || this.startEvaluateToursInExecution.timeout);
};
/**
* Job runner convenience method that handles start, watching and fetching for the associated job operation.
* @param {? extends RequestBase} request - please refer to PTV xServer API documentation for the request parameters.
* @param {ResponseCallback} handler - the callback for the final job result; this callback is required.
* @param {object} [watchRequest] - options passed to watchJob calls.
* @param {JobUpdateCallback} [onUpdate] - callback for job status updates; called for major state changes and progress updates if activated in watchRequest.
* @param {RetryOptions} [retryOptions] - options for watch / fetch retries.
* @returns {object} the XMLHttpRequest object used, or null if the active request limit has been reached.
*/
this.runEvaluateToursInExecution = function (request , handler, watchRequest, onUpdate, retryOptions) {
return runJob(client.startEvaluateToursInExecution, request, client.fetchToursResponse, handler, watchRequest, onUpdate, retryOptions);
};
/**
* Job result fetcher service operation.
* @param {string} id - the unique job id to fetch.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the result of this job, which can be an error.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.fetchChangeToursProposalsResponse = function (id, onResult, timeout) {
return sendRequest("fetchChangeToursProposalsResponse", { id: id }, onResult, timeout || client.fetchChangeToursProposalsResponse.timeout);
};
/**
* Job result fetcher service operation.
* @param {string} id - the unique job id to fetch.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the result of this job, which can be an error.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.fetchToursResponse = function (id, onResult, timeout) {
return sendRequest("fetchToursResponse", { id: id }, onResult, timeout || client.fetchToursResponse.timeout);
};
/**
* Job watcher service operation.
* @param {object} [watchRequest] options passed to watchJob calls.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the job object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.watchJob = function (watchRequest, onResult, timeout) {
return sendRequest("watchJob", watchRequest, onResult, timeout || client.watchJob.timeout);
};
/**
* Service operation attempting to stop a job.
* @param {object} jobRequest - contains the unique job id to stop.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the job object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.stopJob = function (jobRequest, onResult, timeout) {
return sendRequest("stopJob", jobRequest, onResult, timeout || client.stopJob.timeout);
};
/**
* Service operation to delete a job.
* @param {object} jobRequest - contains the unique job id to delete.
* @param {ResponseCallback} [onResult] the callback to be used; if omitted, the request will be sent synchronously.
* @param {number} [timeout] the client timeout for the request in ms; if omitted, the default value of this operation's static timeout variable will be used; the default is zero, indicating no timeout.
* @returns {object} in the (recommended) asynchronous mode, the XMLHttpRequest object used, or null if the active request limit has been reached. In the synchronous mode, the job object.
* @throws {object} only in the synchroneous mode: the error message from the server, or an Error object if a CORS request would be needed for an older IE browser.
*/
this.deleteJob = function (jobRequest, onResult, timeout) {
return sendRequest("deleteJob", jobRequest, onResult, timeout || client.deleteJob.timeout);
};
} | }
|
0011_auto_20200722_1741.py | # Generated by Django 3.0.8 on 2020-07-22 12:11
from django.db import migrations
class | (migrations.Migration):
dependencies = [
('Home', '0010_auto_20200722_1738'),
]
operations = [
migrations.RenameField(
model_name='student',
old_name='fathermobileno',
new_name='father_mobile_no',
),
migrations.RenameField(
model_name='student',
old_name='fathername',
new_name='father_name',
),
migrations.RenameField(
model_name='student',
old_name='mobileno',
new_name='mobile_no',
),
migrations.RenameField(
model_name='student',
old_name='rollno',
new_name='roll_no',
),
]
| Migration |
trace_macros.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use syntax::ext::base::ExtCtxt;
use syntax::ext::base;
use syntax::feature_gate;
use syntax::parse::token::keywords;
use syntax_pos::Span;
use syntax::tokenstream::TokenTree;
pub fn expand_trace_macros(cx: &mut ExtCtxt,
sp: Span,
tt: &[TokenTree])
-> Box<base::MacResult + 'static> | {
if !cx.ecfg.enable_trace_macros() {
feature_gate::emit_feature_err(&cx.parse_sess.span_diagnostic,
"trace_macros",
sp,
feature_gate::GateIssue::Language,
feature_gate::EXPLAIN_TRACE_MACROS);
return base::DummyResult::any(sp);
}
match (tt.len(), tt.first()) {
(1, Some(&TokenTree::Token(_, ref tok))) if tok.is_keyword(keywords::True) => {
cx.set_trace_macros(true);
}
(1, Some(&TokenTree::Token(_, ref tok))) if tok.is_keyword(keywords::False) => {
cx.set_trace_macros(false);
}
_ => cx.span_err(sp, "trace_macros! accepts only `true` or `false`"),
}
base::DummyResult::any(sp)
} |
|
main.rs | #[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
use crate::args::{ClientSubcommand, KeysSubcommand, SubCommand};
use crate::config::init_config;
use crate::keys::show_keys;
use crate::{orchestrator::orchestrator, relayer::relayer};
use args::Opts;
use clap::Parser;
use client::cosmos_to_eth::cosmos_to_eth;
use client::deploy_erc20_representation::deploy_erc20_representation;
use client::eth_to_cosmos::eth_to_cosmos;
use config::{get_home_dir, load_config};
use env_logger::Env;
use keys::register_orchestrator_address::register_orchestrator_address;
use keys::set_eth_key;
use keys::set_orchestrator_key;
mod args;
mod client;
mod config;
mod keys;
mod orchestrator;
mod relayer;
mod utils;
#[actix_rt::main]
async fn | () {
env_logger::Builder::from_env(Env::default().default_filter_or("info")).init();
// On Linux static builds we need to probe ssl certs path to be able to
// do TLS stuff.
openssl_probe::init_ssl_cert_env_vars();
// parse the arguments
let opts: Opts = Opts::parse();
// handle global config here
let address_prefix = opts.address_prefix;
let home_dir = get_home_dir(opts.home);
let config = load_config(&home_dir);
// control flow for the command structure
match opts.subcmd {
SubCommand::Client(client_opts) => match client_opts.subcmd {
ClientSubcommand::EthToCosmos(eth_to_cosmos_opts) => {
eth_to_cosmos(eth_to_cosmos_opts, address_prefix).await
}
ClientSubcommand::CosmosToEth(cosmos_to_eth_opts) => {
cosmos_to_eth(cosmos_to_eth_opts, address_prefix).await
}
ClientSubcommand::DeployErc20Representation(deploy_erc20_opts) => {
deploy_erc20_representation(deploy_erc20_opts, address_prefix).await
}
},
SubCommand::Keys(key_opts) => match key_opts.subcmd {
KeysSubcommand::RegisterOrchestratorAddress(set_orchestrator_address_opts) => {
register_orchestrator_address(
set_orchestrator_address_opts,
address_prefix,
home_dir,
)
.await
}
KeysSubcommand::Show => show_keys(&home_dir, &address_prefix),
KeysSubcommand::SetEthereumKey(set_eth_key_opts) => {
set_eth_key(&home_dir, set_eth_key_opts)
}
KeysSubcommand::SetOrchestratorKey(set_orch_key_opts) => {
set_orchestrator_key(&home_dir, set_orch_key_opts)
}
},
SubCommand::Orchestrator(orchestrator_opts) => {
orchestrator(orchestrator_opts, address_prefix, &home_dir, config).await
}
SubCommand::Relayer(relayer_opts) => {
relayer(relayer_opts, address_prefix, &home_dir, &config.relayer).await
}
SubCommand::Init(init_opts) => init_config(init_opts, home_dir),
}
}
| main |
main.go | package main
import (
"log"
"math"
"github.com/unixpickle/model3d/model2d"
"github.com/unixpickle/model3d/model3d"
"github.com/unixpickle/model3d/render3d"
)
const (
Height = 4.0 | Radius = 0.3
BaseWidth = 3.0
BaseDepth = 2.0
)
func main() {
log.Println("Creating solid...")
solid := model3d.JoinedSolid{
&model3d.Rect{
MinVal: model3d.XYZ(-BaseWidth/2, -BaseDepth/2, -Radius),
MaxVal: model3d.XYZ(BaseWidth/2, BaseDepth/2, Radius),
},
&TubeSolid{
Curve: model2d.MeshToCollider(Mesh2D(PolyPiecewiseCurve)),
},
&TubeSolid{
Curve: model2d.MeshToCollider(Mesh2D(SinusoidalCurve)),
},
}
log.Println("Creating mesh...")
mesh := model3d.MarchingCubesSearch(solid, 0.01, 8)
log.Println("Saving results...")
mesh.SaveGroupedSTL("curvy_thing.stl")
render3d.SaveRandomGrid("rendering.png", mesh, 3, 3, 300, nil)
}
type TubeSolid struct {
Curve model2d.Collider
}
func (t TubeSolid) Min() model3d.Coord3D {
return model3d.XYZ(t.Curve.Min().Y-Radius, -Radius, t.Curve.Min().X-Radius)
}
func (t TubeSolid) Max() model3d.Coord3D {
return model3d.XYZ(t.Curve.Max().Y+Radius, Radius, t.Curve.Max().X)
}
func (t TubeSolid) Contains(c model3d.Coord3D) bool {
if !model3d.InBounds(t, c) {
return false
}
c2d := model2d.Coord{X: c.Z, Y: c.X}
if math.Abs(c.Y) > Radius {
return false
}
radius := math.Sqrt(Radius*Radius - c.Y*c.Y)
return t.Curve.CircleCollision(c2d, radius)
}
func Mesh2D(f func(float64) float64) *model2d.Mesh {
res := model2d.NewMesh()
for z := 0.0; z+0.01 < Height; z += 0.01 {
p1 := model2d.Coord{X: z, Y: f(z)}
p2 := model2d.Coord{X: z + 0.01, Y: f(z + 0.01)}
res.Add(&model2d.Segment{p1, p2})
}
return res
}
func SinusoidalCurve(z float64) float64 {
return -0.3*math.Sin(8-2*z)*math.Sqrt(z) + 0.6
}
func PolyPiecewiseCurve(z float64) float64 {
if z < 2 {
return 0.5*math.Pow(z, 2)*(z-2) - 0.4
} else {
return -PolyPiecewiseCurve(4-z) - 0.8
}
} | |
sl.js | /*
Copyright (c) 2003-2021, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/legal/ckeditor-oss-license
*/ | CKEDITOR.lang['sl']={"widget":{"move":"Kliknite in povlecite, da premaknete","label":"%1 widget"},"uploadwidget":{"abort":"Upload aborted by the page.","doneOne":"File successfully uploaded.","doneMany":"Successfully uploaded %1 files.","uploadOne":"Uploading file ({percentage}%)...","uploadMany":"Uploading files, {current} of {max} done ({percentage}%)..."},"undo":{"redo":"Uveljavi","undo":"Razveljavi"},"toolbar":{"toolbarCollapse":"Skrči orodno vrstico","toolbarExpand":"Razširi orodno vrstico","toolbarGroups":{"document":"Dokument","clipboard":"Odložišče/Razveljavi","editing":"Urejanje","forms":"Obrazci","basicstyles":"Osnovni slogi","paragraph":"Odstavek","links":"Povezave","insert":"Vstavi","styles":"Slogi","colors":"Barve","tools":"Orodja"},"toolbars":"Orodne vrstice urejevalnika"},"table":{"border":"Velikost obrobe","caption":"Napis","cell":{"menu":"Celica","insertBefore":"Vstavi celico pred","insertAfter":"Vstavi celico za","deleteCell":"Izbriši celice","merge":"Združi celice","mergeRight":"Združi desno","mergeDown":"Združi navzdol","splitHorizontal":"Razdeli celico vodoravno","splitVertical":"Razdeli celico navpično","title":"Lastnosti celice","cellType":"Vrsta celice","rowSpan":"Razpon vrstic","colSpan":"Razpon stolpcev","wordWrap":"Prelom besedila","hAlign":"Vodoravna poravnava","vAlign":"Navpična poravnava","alignBaseline":"Osnovnica","bgColor":"Barva ozadja","borderColor":"Barva obrobe","data":"Podatki","header":"Glava","yes":"Da","no":"Ne","invalidWidth":"Širina celice mora biti število.","invalidHeight":"Višina celice mora biti število.","invalidRowSpan":"Razpon vrstic mora biti celo število.","invalidColSpan":"Razpon stolpcev mora biti celo število.","chooseColor":"Izberi"},"cellPad":"Odmik znotraj celic","cellSpace":"Razmik med celicami","column":{"menu":"Stolpec","insertBefore":"Vstavi stolpec pred","insertAfter":"Vstavi stolpec za","deleteColumn":"Izbriši stolpce"},"columns":"Stolpci","deleteTable":"Izbriši tabelo","headers":"Glave","headersBoth":"Oboje","headersColumn":"Prvi stolpec","headersNone":"Brez","headersRow":"Prva vrstica","heightUnit":"height unit","invalidBorder":"Širina obrobe mora biti število.","invalidCellPadding":"Odmik znotraj celic mora biti pozitivno število.","invalidCellSpacing":"Razmik med celicami mora biti pozitivno število.","invalidCols":"Število stolpcev mora biti večje od 0.","invalidHeight":"Višina tabele mora biti število.","invalidRows":"Število vrstic mora biti večje od 0.","invalidWidth":"Širina tabele mora biti število.","menu":"Lastnosti tabele","row":{"menu":"Vrstica","insertBefore":"Vstavi vrstico pred","insertAfter":"Vstavi vrstico za","deleteRow":"Izbriši vrstice"},"rows":"Vrstice","summary":"Povzetek","title":"Lastnosti tabele","toolbar":"Tabela","widthPc":"odstotkov","widthPx":"pik","widthUnit":"enota širine"},"stylescombo":{"label":"Slog","panelTitle":"Oblikovalni Stili","panelTitle1":"Slogi odstavkov","panelTitle2":"Slogi besedila","panelTitle3":"Slogi objektov"},"specialchar":{"options":"Možnosti posebnih znakov","title":"Izberi posebni znak","toolbar":"Vstavi posebni znak"},"sourcearea":{"toolbar":"Izvorna koda"},"scayt":{"btn_about":"O storitvi SCAYT","btn_dictionaries":"Slovarji","btn_disable":"Onemogoči SCAYT","btn_enable":"Omogoči SCAYT","btn_langs":"Jeziki","btn_options":"Možnosti","text_title":"Črkovanje med tipkanjem"},"removeformat":{"toolbar":"Odstrani oblikovanje"},"pastetext":{"button":"Prilepi kot golo besedilo","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Prilepi kot golo besedilo"},"pastefromword":{"confirmCleanup":"Besedilo, ki ga želite prilepiti, je kopirano iz Worda. Ali ga želite očistiti, preden ga prilepite?","error":"Ni bilo mogoče očistiti prilepljenih podatkov zaradi notranje napake","title":"Prilepi iz Worda","toolbar":"Prilepi iz Worda"},"notification":{"closed":"Notification closed."},"maximize":{"maximize":"Maksimiraj","minimize":"Minimiraj"},"magicline":{"title":"Vstavite odstavek tukaj"},"list":{"bulletedlist":"Vstavi/odstrani neoštevilčen seznam","numberedlist":"Vstavi/odstrani oštevilčen seznam"},"link":{"acccessKey":"Tipka za dostop","advanced":"Napredno","advisoryContentType":"Predlagana vrsta vsebine","advisoryTitle":"Predlagani naslov","anchor":{"toolbar":"Sidro","menu":"Uredi sidro","title":"Lastnosti sidra","name":"Ime sidra","errorName":"Prosimo, vnesite ime sidra","remove":"Odstrani sidro"},"anchorId":"Po ID-ju elementa","anchorName":"Po imenu sidra","charset":"Nabor znakov povezanega vira","cssClasses":"Razredi slogovne predloge","download":"Force Download","displayText":"Display Text","emailAddress":"E-poštni naslov","emailBody":"Telo sporočila","emailSubject":"Zadeva sporočila","id":"Id","info":"Podatki o povezavi","langCode":"Koda jezika","langDir":"Smer jezika","langDirLTR":"Od leve proti desni (LTR)","langDirRTL":"Od desne proti levi (RTL)","menu":"Uredi povezavo","name":"Ime","noAnchors":"(V tem dokumentu ni sider)","noEmail":"Vnesite e-poštni naslov","noUrl":"Vnesite URL povezave","noTel":"Please type the phone number","other":"<drugo>","phoneNumber":"Phone number","popupDependent":"Podokno (Netscape)","popupFeatures":"Značilnosti pojavnega okna","popupFullScreen":"Celozaslonsko (IE)","popupLeft":"Lega levo","popupLocationBar":"Naslovna vrstica","popupMenuBar":"Menijska vrstica","popupResizable":"Spremenljive velikosti","popupScrollBars":"Drsniki","popupStatusBar":"Vrstica stanja","popupToolbar":"Orodna vrstica","popupTop":"Lega na vrhu","rel":"Odnos","selectAnchor":"Izberite sidro","styles":"Slog","tabIndex":"Številka tabulatorja","target":"Cilj","targetFrame":"<okvir>","targetFrameName":"Ime ciljnega okvirja","targetPopup":"<pojavno okno>","targetPopupName":"Ime pojavnega okna","title":"Povezava","toAnchor":"Sidro na tej strani","toEmail":"E-pošta","toUrl":"URL","toPhone":"Phone","toolbar":"Vstavi/uredi povezavo","type":"Vrsta povezave","unlink":"Odstrani povezavo","upload":"Naloži"},"indent":{"indent":"Povečaj zamik","outdent":"Zmanjšaj zamik"},"image":{"alt":"Nadomestno besedilo","border":"Obroba","btnUpload":"Pošlji na strežnik","button2Img":"Želite pretvoriti izbrani gumb s sliko v preprosto sliko?","hSpace":"Vodoravni odmik","img2Button":"Želite pretvoriti izbrano sliko v gumb s sliko?","infoTab":"Podatki o sliki","linkTab":"Povezava","lockRatio":"Zakleni razmerje","menu":"Lastnosti slike","resetSize":"Ponastavi velikost","title":"Lastnosti slike","titleButton":"Lastnosti gumba s sliko","upload":"Naloži","urlMissing":"Manjka URL vira slike.","vSpace":"Navpični odmik","validateBorder":"Meja mora biti celo število.","validateHSpace":"Vodoravni odmik mora biti celo število.","validateVSpace":"VSpace mora biti celo število."},"horizontalrule":{"toolbar":"Vstavi vodoravno črto"},"format":{"label":"Oblika","panelTitle":"Oblika odstavka","tag_address":"Napis","tag_div":"Navaden (DIV)","tag_h1":"Naslov 1","tag_h2":"Naslov 2","tag_h3":"Naslov 3","tag_h4":"Naslov 4","tag_h5":"Naslov 5","tag_h6":"Naslov 6","tag_p":"Navaden","tag_pre":"Oblikovan"},"filetools":{"loadError":"Error occurred during file read.","networkError":"Network error occurred during file upload.","httpError404":"HTTP error occurred during file upload (404: File not found).","httpError403":"HTTP error occurred during file upload (403: Forbidden).","httpError":"HTTP error occurred during file upload (error status: %1).","noUrlError":"Upload URL is not defined.","responseError":"Incorrect server response."},"fakeobjects":{"anchor":"Sidro","flash":"Animacija flash","hiddenfield":"Skrito polje","iframe":"IFrame","unknown":"Neznan objekt"},"elementspath":{"eleLabel":"Pot elementov","eleTitle":"Element %1"},"contextmenu":{"options":"Možnosti kontekstnega menija"},"clipboard":{"copy":"Kopiraj","copyError":"Varnostne nastavitve brskalnika ne dopuščajo samodejnega kopiranja. Uporabite kombinacijo tipk na tipkovnici (Ctrl/Cmd+C).","cut":"Izreži","cutError":"Varnostne nastavitve brskalnika ne dopuščajo samodejnega izrezovanja. Uporabite kombinacijo tipk na tipkovnici (Ctrl/Cmd+X).","paste":"Prilepi","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"Prilepi območje","pasteMsg":"Paste your content inside the area below and press OK."},"blockquote":{"toolbar":"Citat"},"basicstyles":{"bold":"Krepko","italic":"Ležeče","strike":"Prečrtano","subscript":"Podpisano","superscript":"Nadpisano","underline":"Podčrtano"},"about":{"copy":"Copyright © $1. Vse pravice pridržane.","dlgTitle":"O programu CKEditor 4","moreInfo":"Za informacije o licenciranju prosimo obiščite našo spletno stran:"},"editor":"Urejevalnik obogatenega besedila","editorPanel":"Plošča urejevalnika obogatenega besedila","common":{"editorHelp":"Pritisnite ALT 0 za pomoč","browseServer":"Prebrskaj na strežniku","url":"URL","protocol":"Protokol","upload":"Naloži","uploadSubmit":"Pošlji na strežnik","image":"Slika","flash":"Flash","form":"Obrazec","checkbox":"Potrditveno polje","radio":"Izbirno polje","textField":"Besedilno polje","textarea":"Besedilno območje","hiddenField":"Skrito polje","button":"Gumb","select":"Spustno polje","imageButton":"Slikovni gumb","notSet":"<ni določen>","id":"Id","name":"Ime","langDir":"Smer jezika","langDirLtr":"Od leve proti desni (LTR)","langDirRtl":"Od desne proti levi (RTL)","langCode":"Koda jezika","longDescr":"Dolg opis URL-ja","cssClass":"Razredi slogovne predloge","advisoryTitle":"Predlagani naslov","cssStyle":"Slog","ok":"V redu","cancel":"Prekliči","close":"Zapri","preview":"Predogled","resize":"Potegni za spremembo velikosti","generalTab":"Splošno","advancedTab":"Napredno","validateNumberFailed":"Vrednost ni število.","confirmNewPage":"Vse neshranjene spremembe vsebine bodo izgubljene. Ali res želite naložiti novo stran?","confirmCancel":"Spremenili ste nekaj možnosti. Ali res želite zapreti okno?","options":"Možnosti","target":"Cilj","targetNew":"Novo okno (_blank)","targetTop":"Vrhovno okno (_top)","targetSelf":"Isto okno (_self)","targetParent":"Starševsko okno (_parent)","langDirLTR":"Od leve proti desni (LTR)","langDirRTL":"Od desne proti levi (RTL)","styles":"Slog","cssClasses":"Razredi slogovne predloge","width":"Širina","height":"Višina","align":"Poravnava","left":"Levo","right":"Desno","center":"Sredinsko","justify":"Obojestranska poravnava","alignLeft":"Leva poravnava","alignRight":"Desna poravnava","alignCenter":"Align Center","alignTop":"Na vrh","alignMiddle":"V sredino","alignBottom":"Na dno","alignNone":"Brez poravnave","invalidValue":"Neveljavna vrednost.","invalidHeight":"Višina mora biti število.","invalidWidth":"Širina mora biti število.","invalidLength":"Value specified for the \"%1\" field must be a positive number with or without a valid measurement unit (%2).","invalidCssLength":"Vrednost, določena za polje »%1«, mora biti pozitivno število z ali brez veljavne CSS-enote za merjenje (px, %, in, cm, mm, em, ex, pt ali pc).","invalidHtmlLength":"Vrednost, določena za polje »%1«, mora biti pozitivno število z ali brez veljavne HTML-enote za merjenje (px ali %).","invalidInlineStyle":"Vrednost, določena za slog v vrstici, mora biti sestavljena iz ene ali več dvojic oblike »ime : vrednost«, ločenih s podpičji.","cssLengthTooltip":"Vnesite število za vrednost v slikovnih pikah ali število z veljavno CSS-enoto (px, %, in, cm, mm, em, ex, pt ali pc).","unavailable":"%1<span class=\"cke_accessibility\">, nedosegljiv</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Space","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Keyboard shortcut","optionDefault":"Default"}}; |
|
openid_auth.ts | /*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
import * as fs from 'fs';
import wreck from '@hapi/wreck';
import {
Logger,
SessionStorageFactory,
CoreSetup,
IRouter,
ILegacyClusterClient,
KibanaRequest,
LifecycleResponseFactory,
AuthToolkit,
IKibanaResponse,
} from 'kibana/server';
import HTTP from 'http';
import HTTPS from 'https';
import { PeerCertificate } from 'tls';
import { SecurityPluginConfigType } from '../../..';
import { SecuritySessionCookie } from '../../../session/security_cookie';
import { OpenIdAuthRoutes } from './routes';
import { AuthenticationType } from '../authentication_type';
import { callTokenEndpoint } from './helper';
import { composeNextUrlQeuryParam } from '../../../utils/next_url';
export interface OpenIdAuthConfig {
authorizationEndpoint?: string;
tokenEndpoint?: string;
endSessionEndpoint?: string;
scope?: string;
authHeaderName?: string;
}
export interface WreckHttpsOptions {
ca?: string | Buffer | Array<string | Buffer>;
checkServerIdentity?: (host: string, cert: PeerCertificate) => Error | undefined;
}
export class | extends AuthenticationType {
public readonly type: string = 'openid';
private openIdAuthConfig: OpenIdAuthConfig;
private authHeaderName: string;
private openIdConnectUrl: string;
private wreckClient: typeof wreck;
constructor(
config: SecurityPluginConfigType,
sessionStorageFactory: SessionStorageFactory<SecuritySessionCookie>,
router: IRouter,
esClient: ILegacyClusterClient,
core: CoreSetup,
logger: Logger
) {
super(config, sessionStorageFactory, router, esClient, core, logger);
this.wreckClient = this.createWreckClient();
this.openIdAuthConfig = {};
this.authHeaderName = this.config.openid?.header || '';
this.openIdAuthConfig.authHeaderName = this.authHeaderName;
this.openIdConnectUrl = this.config.openid?.connect_url || '';
let scope = this.config.openid!.scope;
if (scope.indexOf('openid') < 0) {
scope = `openid ${scope}`;
}
this.openIdAuthConfig.scope = scope;
try {
this.openIdAuthConfig.authorizationEndpoint = this.config.openid.authorization_endpoint;
this.openIdAuthConfig.tokenEndpoint = this.config.openid.token_endpoint;
this.openIdAuthConfig.endSessionEndpoint = this.config.openid.end_session_endpoint;
const routes = new OpenIdAuthRoutes(
this.router,
this.config,
this.sessionStorageFactory,
this.openIdAuthConfig,
this.securityClient,
this.coreSetup,
this.wreckClient
);
routes.setupRoutes();
} catch (error) {
this.logger.error(error); // TODO: log more info
throw new Error('Failed when trying to obtain the endpoints from your IdP');
}
}
private createWreckClient(): typeof wreck {
const wreckHttpsOption: WreckHttpsOptions = {};
if (this.config.openid?.root_ca) {
wreckHttpsOption.ca = [fs.readFileSync(this.config.openid.root_ca)];
}
if (this.config.openid?.verify_hostnames === false) {
this.logger.debug(`openId auth 'verify_hostnames' option is off.`);
wreckHttpsOption.checkServerIdentity = (host: string, cert: PeerCertificate) => {
return undefined;
};
}
if (Object.keys(wreckHttpsOption).length > 0) {
return wreck.defaults({
agents: {
http: new HTTP.Agent(),
https: new HTTPS.Agent(wreckHttpsOption),
httpsAllowUnauthorized: new HTTPS.Agent({
rejectUnauthorized: false,
}),
},
});
} else {
return wreck;
}
}
requestIncludesAuthInfo(request: KibanaRequest): boolean {
return request.headers.authorization ? true : false;
}
getAdditionalAuthHeader(request: KibanaRequest): any {
return {};
}
getCookie(request: KibanaRequest, authInfo: any): SecuritySessionCookie {
return {
username: authInfo.user_name,
credentials: {
authHeaderValue: request.headers.authorization,
},
authType: this.type,
expiryTime: Date.now() + this.config.session.ttl,
};
}
// TODO: Add token expiration check here
async isValidCookie(cookie: SecuritySessionCookie): Promise<boolean> {
if (
cookie.authType !== this.type ||
!cookie.username ||
!cookie.expiryTime ||
!cookie.credentials?.authHeaderValue ||
!cookie.credentials?.expires_at
) {
return false;
}
if (cookie.credentials?.expires_at > Date.now()) {
return true;
}
// need to renew id token
if (cookie.credentials.refresh_token) {
try {
const query: any = {
grant_type: 'refresh_token',
client_id: this.config.openid?.client_id,
client_secret: this.config.openid?.client_secret,
refresh_token: cookie.credentials.refresh_token,
};
const refreshTokenResponse = await callTokenEndpoint(
this.openIdAuthConfig.tokenEndpoint!,
query,
this.wreckClient
);
// if no id_token from refresh token call, maybe the Idp doesn't allow refresh id_token
if (refreshTokenResponse.idToken) {
cookie.credentials = {
authHeaderValue: `Bearer ${refreshTokenResponse.idToken}`,
refresh_token: refreshTokenResponse.refreshToken,
expires_at: Date.now() + refreshTokenResponse.expiresIn! * 1000, // expiresIn is in second
};
return true;
} else {
return false;
}
} catch (error) {
this.logger.error(error);
return false;
}
} else {
// no refresh token, and current token is expired
return false;
}
}
handleUnauthedRequest(
request: KibanaRequest,
response: LifecycleResponseFactory,
toolkit: AuthToolkit
): IKibanaResponse {
if (this.isPageRequest(request)) {
// nextUrl is a key value pair
const nextUrl = composeNextUrlQeuryParam(
request,
this.coreSetup.http.basePath.serverBasePath
);
return response.redirected({
headers: {
location: `${this.coreSetup.http.basePath.serverBasePath}/auth/openid/login?${nextUrl}`,
},
});
} else {
return response.unauthorized();
}
}
buildAuthHeaderFromCookie(cookie: SecuritySessionCookie): any {
const header: any = {};
const authHeaderValue = cookie.credentials?.authHeaderValue;
if (authHeaderValue) {
header.authorization = authHeaderValue;
}
return header;
}
}
| OpenIdAuthentication |
rest.py |
from flask import Blueprint, abort, current_app, jsonify, request
from sqlalchemy.exc import IntegrityError
from app.config import QueueNames
from app.dao.annual_billing_dao import set_default_free_allowance_for_service
from app.dao.dao_utils import transaction
from app.dao.fact_billing_dao import fetch_usage_year_for_organisation
from app.dao.organisation_dao import (
dao_add_service_to_organisation,
dao_add_user_to_organisation,
dao_create_organisation,
dao_get_organisation_by_email_address,
dao_get_organisation_by_id,
dao_get_organisation_services,
dao_get_organisations,
dao_get_users_for_organisation,
dao_remove_user_from_organisation,
dao_update_organisation,
)
from app.dao.services_dao import dao_fetch_service_by_id
from app.dao.templates_dao import dao_get_template_by_id
from app.dao.users_dao import get_user_by_id
from app.errors import InvalidRequest, register_errors
from app.models import KEY_TYPE_NORMAL, NHS_ORGANISATION_TYPES, Organisation
from app.notifications.process_notifications import (
persist_notification,
send_notification_to_queue,
)
from app.organisation.organisation_schema import (
post_create_organisation_schema,
post_link_service_to_organisation_schema,
post_update_organisation_schema,
)
from app.schema_validation import validate
organisation_blueprint = Blueprint('organisation', __name__)
register_errors(organisation_blueprint)
@organisation_blueprint.errorhandler(IntegrityError)
def handle_integrity_error(exc):
"""
Handle integrity errors caused by the unique constraint on ix_organisation_name
"""
if 'ix_organisation_name' in str(exc):
return jsonify(result="error",
message="Organisation name already exists"), 400
if 'duplicate key value violates unique constraint "domain_pkey"' in str(exc):
return jsonify(result='error',
message='Domain already exists'), 400
current_app.logger.exception(exc)
return jsonify(result='error', message="Internal server error"), 500
@organisation_blueprint.route('', methods=['GET'])
def get_organisations():
organisations = [
org.serialize_for_list() for org in dao_get_organisations()
]
return jsonify(organisations)
@organisation_blueprint.route('/<uuid:organisation_id>', methods=['GET'])
def get_organisation_by_id(organisation_id):
organisation = dao_get_organisation_by_id(organisation_id)
return jsonify(organisation.serialize())
@organisation_blueprint.route('/by-domain', methods=['GET'])
def get_organisation_by_domain():
domain = request.args.get('domain')
if not domain or '@' in domain:
abort(400)
organisation = dao_get_organisation_by_email_address(
'example@{}'.format(request.args.get('domain'))
)
if not organisation:
abort(404)
return jsonify(organisation.serialize())
@organisation_blueprint.route('', methods=['POST'])
def create_organisation():
data = request.get_json()
validate(data, post_create_organisation_schema)
if data["organisation_type"] in NHS_ORGANISATION_TYPES:
data["email_branding_id"] = current_app.config['NHS_EMAIL_BRANDING_ID']
organisation = Organisation(**data)
dao_create_organisation(organisation)
return jsonify(organisation.serialize()), 201
@organisation_blueprint.route('/<uuid:organisation_id>', methods=['POST'])
def update_organisation(organisation_id):
data = request.get_json()
validate(data, post_update_organisation_schema)
organisation = dao_get_organisation_by_id(organisation_id)
if data.get('organisation_type') in NHS_ORGANISATION_TYPES and not organisation.email_branding_id:
data["email_branding_id"] = current_app.config['NHS_EMAIL_BRANDING_ID']
result = dao_update_organisation(organisation_id, **data)
if data.get('agreement_signed') is True:
# if a platform admin has manually adjusted the organisation, don't tell people
if data.get('agreement_signed_by_id'):
send_notifications_on_mou_signed(organisation_id)
if result:
return '', 204
else:
raise InvalidRequest("Organisation not found", 404)
@organisation_blueprint.route('/<uuid:organisation_id>/service', methods=['POST'])
def link_service_to_organisation(organisation_id):
data = request.get_json()
validate(data, post_link_service_to_organisation_schema)
service = dao_fetch_service_by_id(data['service_id'])
service.organisation = None
with transaction():
dao_add_service_to_organisation(service, organisation_id)
set_default_free_allowance_for_service(service, year_start=None)
return '', 204
@organisation_blueprint.route('/<uuid:organisation_id>/services', methods=['GET'])
def get_organisation_services(organisation_id):
services = dao_get_organisation_services(organisation_id)
sorted_services = sorted(services, key=lambda s: (-s.active, s.name))
return jsonify([s.serialize_for_org_dashboard() for s in sorted_services])
@organisation_blueprint.route('/<uuid:organisation_id>/services-with-usage', methods=['GET'])
def get_organisation_services_usage(organisation_id):
|
@organisation_blueprint.route('/<uuid:organisation_id>/users/<uuid:user_id>', methods=['POST'])
def add_user_to_organisation(organisation_id, user_id):
new_org_user = dao_add_user_to_organisation(organisation_id, user_id)
return jsonify(data=new_org_user.serialize())
@organisation_blueprint.route('/<uuid:organisation_id>/users/<uuid:user_id>', methods=['DELETE'])
def remove_user_from_organisation(organisation_id, user_id):
organisation = dao_get_organisation_by_id(organisation_id)
user = get_user_by_id(user_id=user_id)
if user not in organisation.users:
error = 'User not found'
raise InvalidRequest(error, status_code=404)
dao_remove_user_from_organisation(organisation, user)
return {}, 204
@organisation_blueprint.route('/<uuid:organisation_id>/users', methods=['GET'])
def get_organisation_users(organisation_id):
org_users = dao_get_users_for_organisation(organisation_id)
return jsonify(data=[x.serialize() for x in org_users])
def check_request_args(request):
org_id = request.args.get('org_id')
name = request.args.get('name', None)
errors = []
if not org_id:
errors.append({'org_id': ["Can't be empty"]})
if not name:
errors.append({'name': ["Can't be empty"]})
if errors:
raise InvalidRequest(errors, status_code=400)
return org_id, name
def send_notifications_on_mou_signed(organisation_id):
organisation = dao_get_organisation_by_id(organisation_id)
notify_service = dao_fetch_service_by_id(current_app.config['NOTIFY_SERVICE_ID'])
def _send_notification(template_id, recipient, personalisation):
template = dao_get_template_by_id(template_id)
saved_notification = persist_notification(
template_id=template.id,
template_version=template.version,
recipient=recipient,
service=notify_service,
personalisation=personalisation,
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reply_to_text=notify_service.get_default_reply_to_email_address()
)
send_notification_to_queue(saved_notification, research_mode=False, queue=QueueNames.NOTIFY)
personalisation = {
'mou_link': '{}/agreement/{}.pdf'.format(
current_app.config['ADMIN_BASE_URL'],
'crown' if organisation.crown else 'non-crown'
),
'org_name': organisation.name,
'org_dashboard_link': '{}/organisations/{}'.format(
current_app.config['ADMIN_BASE_URL'],
organisation.id
),
'signed_by_name': organisation.agreement_signed_by.name,
'on_behalf_of_name': organisation.agreement_signed_on_behalf_of_name
}
if not organisation.agreement_signed_on_behalf_of_email_address:
signer_template_id = 'MOU_SIGNER_RECEIPT_TEMPLATE_ID'
else:
signer_template_id = 'MOU_SIGNED_ON_BEHALF_SIGNER_RECEIPT_TEMPLATE_ID'
# let the person who has been signed on behalf of know.
_send_notification(
current_app.config['MOU_SIGNED_ON_BEHALF_ON_BEHALF_RECEIPT_TEMPLATE_ID'],
organisation.agreement_signed_on_behalf_of_email_address,
personalisation
)
# let the person who signed know - the template is different depending on if they signed on behalf of someone
_send_notification(
current_app.config[signer_template_id],
organisation.agreement_signed_by.email_address,
personalisation
)
| try:
year = int(request.args.get('year', 'none'))
except ValueError:
return jsonify(result='error', message='No valid year provided'), 400
services = fetch_usage_year_for_organisation(organisation_id, year)
list_services = services.values()
sorted_services = sorted(list_services, key=lambda s: (-s['active'], s['service_name'].lower()))
return jsonify(services=sorted_services) |
crime_db.py | import pandas as pd
from police_api import PoliceAPI
def first_job(api, dates, t_current):
"""
Creates the tables and populates them with the historical data
from T_0
to T_current
"""
# subset of dates
dates_hist = dates[dates <= t_current]
# crime_categories table
s_crime_cat = set()
for date in dates_hist:
s_crime_cat.update(api.get_crime_categories(date))
crime_categories['id'] = [c.url for c in s_crime_cat]
crime_categories['description'] = [c.name for c in s_crime_cat]
crime_categories.set_index('id', inplace=True)
# To get the crimes for each force and neighbourhood
cr = []
for d in date_hist:
cr.append([api.get_crimes_area(n.boundary, date=d) for n in s_nb_flat])
# Flattern the list
crimes_flat = [c for sublist1 in cr for sublist2 in sublist1 for c in sublist2]
# Subset for those containing a valid "persistent_id"
crimes_flat[:] = [c.__dict__ for c in crimes_flat if c.persistent_id != '']
# Convert to DataFrame
df_crimes = pd.DataFrame(crimes_flat)
df_crimes = df_crimes[['month', 'category', 'id', 'persistent_id',
'location', 'context', 'outcome_status']]
# Get the key values for the objects in each column
crimes['latitude'] = df_crimes['location'].apply(lambda x: x.latitude)
crimes['longitude'] = df_crimes['location'].apply(lambda x: x.longitude)
crimes['street'] = df_crimes['location'].apply(lambda x: x.street)
### outcome_categories table ###
# Get outcome_status to populate outcome_categories table
outcome_status = crimes.pop('outcome_status')
df_outcomes = pd.DataFrame(outcome_status.apply(lambda x: x.__dict__).to_list())
df_outcomes.pop('api')
outcome_categories['id'] = df_outcomes['category'].apply(lambda x: x['id'])
outcome_categories['name'] = df_outcomes['category'].apply(lambda x: x['name'])
# Drop duplicates
outcome_categories = outcome_categories.loc[outcome_categories.name.drop_duplicates().index]
outcome_categories.set_index('id', inplace=True)
### streets table ###
# Get streets to populate streets table
s_streets = crimes['street']
streets['id'] = s_streets.apply(lambda x: x['id'])
streets['name'] = s_streets.apply(lambda x: x['name'])
# Drop duplicates
streets = streets.loc[streets.id.drop_duplicates().index]
streets.set_index('id', inplace=True)
# Clean crimes table
crimes['street'] = crimes['street'].apply(lambda x: x['id'])
# rename 'month' to 'date'
crimes.rename(columns={"month": "date"}, inplace=True)
# Ordering columns
cols = ['persistent_id', 'category', 'street', 'latitude', 'longitude', 'date', 'context']
crimes = crimes[cols]
crimes.set_index('persistent_id', inplace=True)
### outcomes table ###
crime_idx = crimes.index.to_list()
l_outcomes = [api.get_crime(idx).outcomes for idx in crime_idx]
l_outcomes_flat = [o for sublist in l_outcomes for o in sublist]
outcomes['crime'] = [o.crime.id for o in l_outcomes_flat]
outcomes['category'] = [o.category.id for o in l_outcomes_flat]
outcomes['date'] = [o.date for o in l_outcomes_flat]
outcomes['person_id'] = [' ' for o in l_outcomes_flat] # person_id is empty given by the api
outcomes.drop_duplicates(['crime', 'category'], inplace=True)
outcomes.set_index(['crime', 'category'], inplace=True)
def second_job(api, dates, t_last_update, t_current):
dates_upd = dates[dates <= t_current and dates >= t_last_update]
s_crime_cat = set()
for date in dates_upd:
s_crime_cat.update(api.get_crime_categories(date))
url = [c.url for c in s_crime_cat]
name = [c.name for c in s_crime_cat]
df_crime_categories = pd.DataFrame.from_dict({'id': url, 'description': name})
df_crime_categories.set_index('id')
crime_categories.append(df_crime_categories, ignore_index=True)
cr = []
for d in dates_upd:
cr.append([api.get_crimes_area(n.boundary, date=d) for n in s_nb_flat])
# Flattern the list
crimes_flat = [c for sublist1 in cr for sublist2 in sublist1 for c in sublist2]
# Subset for those containing a valid "persistent_id"
crimes_flat[:] = [c.__dict__ for c in crimes_flat if c.persistent_id!='']
# Convert to DataFrame
df_crimes = pd.DataFrame(crimes_flat)
df_crimes = df_crimes[['month', 'category', 'id', 'persistent_id', 'location', 'context', 'outcome_status']]
# Get the key values for the objects in each column
df_crimes['latitude'] = df_crimes['location'].apply(lambda x: x.latitude)
df_crimes['longitude'] = df_crimes['location'].apply(lambda x: x.longitude)
df_crimes['street'] = df_crimes['location'].apply(lambda x: x.street)
### outcome_categories table ###
# Get outcome_status to populate outcome_categories table
outcome_status = df_crimes.pop('outcome_status')
df_outcomes = pd.DataFrame(outcome_status.apply(lambda x: x.__dict__).to_list())
df_outcomes.pop('api')
df_outcome_categories = pd.DataFrame({'id': [], 'description': []})
df_outcome_categories['id'] = df_outcomes['category'].apply(lambda x: x['id'])
df_outcome_categories['description'] = df_outcomes['category'].apply(lambda x: x['name'])
# Drop duplicates
df_outcome_categories = df_outcome_categories.loc[df_outcome_categoriesdf_outcome_categories.name.drop_duplicates().index]
df_outcome_categories.set_index('id', inplace=True)
outcome_categories.append(df_outcome_categories, ignore_index=True)
### streets table ###
# Get streets to populate streets table
s_streets = crimes['street']
df_streets = pd.DataFrame({'id': [], 'name': []})
df_streets['id'] = s_streets.apply(lambda x: x['id'])
df_streets['name'] = s_streets.apply(lambda x: x['name'])
# Drop duplicates
df_streets = df_streets.loc[df_streets.id.drop_duplicates().index]
df_streets.set_index('id', inplace=True)
streets.append(df_streets, ignore_index=True)
# Clean crimes table
df_crimes['street'] = df_crimes['street'].apply(lambda x: x['id'])
# rename 'month' to 'date'
df_crimes.rename(columns={"month": "date"}, inplace=True)
# Ordering columns
cols = ['persistent_id', 'category', 'street', 'latitude', 'longitude', 'date', 'context']
df_crimes = crimes[cols]
df_crimes.set_index('persistent_id', inplace=True)
crimes.append(df_crimes, ignore_index=True)
### outcomes table ###
crime_idx = crimes.index.to_list()
l_outcomes = [api.get_crime(idx).outcomes for idx in crime_idx]
l_outcomes_flat = [o for sublist in l_outcomes for o in sublist]
df_outcomes = pd.DataFrame({'crime': [], 'category': [], 'date': [], 'person_id': []})
df_outcomes['crime'] = [o.crime.id for o in l_outcomes_flat]
df_outcomes['category'] = [o.category.id for o in l_outcomes_flat]
df_outcomes['date'] = [o.date for o in l_outcomes_flat]
df_outcomes['person_id'] = [' ' for o in l_outcomes_flat] # person_id is empty given by the api
df_outcomes.drop_duplicates(['crime', 'category'], inplace=True)
df_outcomes.set_index(['crime', 'category'], inplace=True)
outcomes.append(df_outcomes, ignore_index=True)
def last_job(api, dates, t_current):
dates_upd = dates[dates == t_current]
s_crime_cat = set()
for date in dates_upd:
s_crime_cat.update(api.get_crime_categories(date))
url = [c.url for c in s_crime_cat]
name = [c.name for c in s_crime_cat]
df_crime_categories = pd.DataFrame.from_dict({'id': url, 'description': name})
df_crime_categories.set_index('id')
crime_categories.append(df_crime_categories, ignore_index=True)
cr = []
for d in dates_upd:
cr.append([api.get_crimes_area(n.boundary, date=d) for n in s_nb_flat])
# Flattern the list
crimes_flat = [c for sublist1 in cr for sublist2 in sublist1 for c in sublist2]
# Subset for those containing a valid "persistent_id"
crimes_flat[:] = [c.__dict__ for c in crimes_flat if c.persistent_id!='']
# Convert to DataFrame
df_crimes = pd.DataFrame(crimes_flat)
df_crimes = df_crimes[['month', 'category', 'id', 'persistent_id', 'location', 'context', 'outcome_status']]
# Get the key values for the objects in each column
df_crimes['latitude'] = df_crimes['location'].apply(lambda x: x.latitude)
df_crimes['longitude'] = df_crimes['location'].apply(lambda x: x.longitude)
df_crimes['street'] = df_crimes['location'].apply(lambda x: x.street)
## outcome_categories table ##
# Get outcome_status to populate outcome_categories table
outcome_status = df_crimes.pop('outcome_status')
df_outcomes = pd.DataFrame(outcome_status.apply(lambda x: x.__dict__).to_list())
df_outcomes.pop('api')
df_outcome_categories = pd.DataFrame({'id': [], 'description': []})
df_outcome_categories['id'] = df_outcomes['category'].apply(lambda x: x['id'])
df_outcome_categories['description'] = df_outcomes['category'].apply(lambda x: x['name'])
# Drop duplicates
df_outcome_categories = df_outcome_categories.loc[df_outcome_categoriesdf_outcome_categories.name.drop_duplicates().index]
df_outcome_categories.set_index('id', inplace=True)
outcome_categories.append(df_outcome_categories, ignore_index=True)
### streets table ###
# Get streets to populate streets table
s_streets = crimes['street']
df_streets = pd.DataFrame({'id': [], 'name': []})
df_streets['id'] = s_streets.apply(lambda x: x['id'])
df_streets['name'] = s_streets.apply(lambda x: x['name'])
# Drop duplicates
df_streets = df_streets.loc[df_streets.id.drop_duplicates().index]
df_streets.set_index('id', inplace=True)
streets.append(df_streets, ignore_index=True)
# Clean crimes table
df_crimes['street'] = df_crimes['street'].apply(lambda x: x['id'])
# rename 'month' to 'date'
df_crimes.rename(columns={"month": "date"}, inplace=True)
# Ordering columns
cols = ['persistent_id', 'category', 'street', 'latitude', 'longitude', 'date', 'context']
df_crimes = crimes[cols]
df_crimes.set_index('persistent_id', inplace=True)
crimes.append(df_crimes, ignore_index=True)
### outcomes table ###
crime_idx = crimes.index.to_list()
l_outcomes = [api.get_crime(idx).outcomes for idx in crime_idx]
l_outcomes_flat = [o for sublist in l_outcomes for o in sublist]
df_outcomes = pd.DataFrame({'crime': [], 'category': [], 'date': [], 'person_id': []})
df_outcomes['crime'] = [o.crime.id for o in l_outcomes_flat]
df_outcomes['category'] = [o.category.id for o in l_outcomes_flat]
df_outcomes['date'] = [o.date for o in l_outcomes_flat]
df_outcomes['person_id'] = [' ' for o in l_outcomes_flat] # person_id is empty given by the api
df_outcomes.drop_duplicates(['crime', 'category'], inplace=True)
df_outcomes.set_index(['crime', 'category'], inplace=True)
outcomes.append(df_outcomes, ignore_index=True)
def main(t_current):
# Call the police API
api = Police | __ == "__main__":
main(t_current)
| API()
# Define tables
crime_categories = pd.DataFrame({'id': [], 'description': []})
outcome_categories = pd.DataFrame({'id': [], 'description': []})
streets = pd.DataFrame({'id': [], 'name': []})
crimes = pd.DataFrame({'persistent_id': [], 'category': [], 'street': [], 'city': [], 'latitude': [], 'longitude': [], 'date': [], 'context': []})
outcomes = pd.DataFrame({'crime': [], 'category': [], 'date': [], 'person_id': []})
# Transform dates into pandas Series for better manipulation
dates = pd.Series(api.get_dates())
# Get Forces
forces = api.get_forces()
# Get neighbourhoods
neighbourhoods = [f.neighbourhoods for f in forces]
nb_flat = [n for sublist in neighbourhoods for n in sublist]
s_nb_flat = pd.Series(nb_flat).unique()
first_job(api, dates, t_current)
t_last_update = api.get_latest_date()
second_job(api, dates, t_last_update, t_current)
last_job(api, t_current)
if __name |
main.rs | use std::io;
use std::cmp::Ordering;
use rand::Rng;
fn main() | {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
//println!("The secret number is: {}", secret_number);
loop {
println!("Please input your guess.");
let mut guess = String::new();
io::stdin().read_line(&mut guess)
.expect("Failed to read line");
let guess: u32 = match guess.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("You win!");
break;
}
}
}
} |
|
lib.rs | //!
//! Scoped-Arena provides arena allocator with explicit scopes.
//!
//! ## Arena allocation
//!
//! Arena allocators are simple and provides ludicrously fast allocation.\
//! Basically allocation requires only increment of internal pointer in the memory block to alignment of allocated object and then to size of allocated object and that's it.\
//! When memory block is exhausted arena will allocate new bigger memory block.\
//! Then arena can be reset after all allocated objects are not used anymore, keeping only last memory block and reuse it.\
//! After several warmup iterations the only memory block is large enough to handle all allocations until next reset.
//!
//!
//! ### Example
//!
//! ```rust
//! use scoped_arena::Scope;
//!
//! struct Cat {
//! name: String,
//! hungry: bool,
//! }
//!
//! /// Create new arena with `Global` allocator.
//! let mut scope = Scope::new();
//!
//! /// Construct a cat and move it to the scope.
//! let cat: &mut Cat = scope.to_scope(Cat {
//! name: "Fluffy".to_owned(),
//! hungry: true,
//! });
//!
//! // Now `cat` is a mutable reference bound to scope borrow lifetime.
//!
//! assert_eq!(&cat.name, "Fluffy");
//! assert!(cat.hungry);
//!
//! cat.hungry = false;
//!
//! // This cat instance on scope will be automatically dropped when `scope` is dropped or reset.
//! // It is impossible to reset before last usage of `cat`.
//!
//! // Next line will drop cat value and free memory occupied by it.
//! scope.reset();
//!
//! // If there were more cats or any other objects put on scope they all would be dropped and memory freed.
//! ```
//!
//! ## Scopes
//!
//! To reuse memory earlier this crates provides `Scope` with methods to create sub-`Scope`s.\
//! When sub-`Scope` is reset or dropped it will `Drop` all stored values and free memory allocated by the scope and flush last of new allocated memory block into parent.\
//! While objects allocated with parent `Scope` are unchanged and still valid.
//!
//! Well placed scopes can significantly reduce memory consumption.\
//! For example if few function calls use a lot of dynamic memory but don't need it to be available in caller\
//! they can be provided with sub-scope.\
//! At the same time any memory allocated in parent scope stays allocated.
//!
//! Creating sub-scope is cheap and allocating within sub-scope is as fast as allocating in parent scope.\
//!
//! ### Example
//!
//! ```rust
//! use scoped_arena::{Scope, ScopeProxy};
//!
//!
//! fn heavy_on_memory(mut scope: Scope<'_>, foobar: &String) {
//! for _ in 0 .. 42 {
//! let foobar: &mut String = scope.to_scope(foobar.clone());
//! }
//!
//! // new `scope` is dropped here and drops all allocated strings and frees memory.
//! }
//!
//! let mut scope = Scope::new();
//!
//! // Proxy is required to be friends with borrow checker.
//! // Creating sub-scope must lock parent `Scope` from being used, which requires mutable borrow, but any allocation borrows `Scope`.
//! // `Proxy` relaxes this a bit. `Proxy` borrows `Scope` mutably and tie allocated objects lifetime to scopes' borrow lifetime.
//! // So sub-scope can borrow proxy mutably while there are objects allocated from it.
//! let mut proxy = scope.proxy();
//!
//! let foobar: &mut String = proxy.to_scope("foobar".to_owned());
//!
//! // Make sub-scope for the call.
//! heavy_on_memory(proxy.scope(), &*foobar);
//!
//! // If `heavy_on_memory` didn't trigger new memory object allocation in the scope,
//! // sub-scope drop would rewind scope's internals to exactly the same state.
//! // Otherwise last of new blocks will become current block in parent scope.
//! //
//! // Note that `foobar` is still alive.
//!
//! heavy_on_memory(proxy.scope(), &*foobar);
//! heavy_on_memory(proxy.scope(), &*foobar);
//! heavy_on_memory(proxy.scope(), &*foobar);
//! heavy_on_memory(proxy.scope(), &*foobar);
//!
//! // Once peak memory consumption is reached, any number of `heavy_on_memory` calls would not require new memory blocks to be allocated.
//! // Even `loop { heavy_on_memory(proxy.scope(), &*foobar) }` will settle on some big enough block.
//! ```
//!
//! ## Dropping
//!
//! `to_scope` and `try_to_scope` methods store drop-glue for values that `needs_drop`.
//! On reset or drop scope iterates and properly drops all values.
//! No drop-glue is added for types that doesn't need drop. `Scope` allocates enough memory and writes value there, no bookkeeping overhead.
//!
//! ## Iterator collecting
//!
//! `to_scope_from_iter` method acts as `to_scope` but works on iterators and returns slices.
//! The limitation is that `to_scope_from_iter` need to allocate memory enough for upper bound of what iterator can yield.
//! If upper bound is too large or iterator is unbounded it will always fail.
//! One can use `try_to_scope_from_iter` so fail is `Err` and not panic.
//! It is safe for iterator to yield more items then upper bound it reports, `to_scope_from_iter` would not iterate past upper bound.
//! On success it returns mutable reference to slice with items from iterator in order.
//! All values will be dropped on scope reset or drop, same as with `to_scope`.
//!
//! This method is especially useful to deal with API that requires slices (*glares at FFI*), collecting into temporary `Vec` would cost much more.
//!
// #![no_std]
#![cfg(any(feature = "allocator_api", feature = "alloc"))]
#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
#[cfg(feature = "alloc")]
extern crate alloc;
mod allocator_api;
mod bucket;
mod drop;
use core::{
alloc::Layout,
fmt::{self, Debug},
iter::IntoIterator,
mem::needs_drop,
ptr::{write, NonNull},
};
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
use alloc::alloc::handle_alloc_error;
use self::{
bucket::Buckets,
drop::{DropList, WithDrop},
};
use self::allocator_api::{AllocError, Allocator};
#[cfg(feature = "alloc")]
use self::allocator_api::Global;
/// Scope associated with `Scope` allocator.
/// Allows placing values on the scope returning reference bound to scope borrow.
/// On drop scope drops all values placed onto it.
/// On drop scope frees all memory allocated from it.
#[cfg(not(feature = "alloc"))]
pub struct Scope<'arena, A: Allocator> {
buckets: Buckets<'arena>,
alloc: &'arena A,
drop_list: DropList<'static>,
}
/// Scope associated with `Scope` allocator.
/// Allows placing values on the scope returning reference bound to scope borrow.
/// On drop scope drops all values placed onto it.
/// On drop scope frees all memory allocated from it.
#[cfg(feature = "alloc")]
pub struct Scope<'arena, A: Allocator = Global> {
buckets: Buckets<'arena>,
alloc: A,
drop_list: DropList<'static>,
}
impl<A> Debug for Scope<'_, A>
where
A: Allocator,
{
fn | (&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Scope")
.field("buckets", &self.buckets)
.finish_non_exhaustive()
}
}
impl<A> Drop for Scope<'_, A>
where
A: Allocator,
{
#[inline(always)]
fn drop(&mut self) {
unsafe {
self.drop_list.reset();
self.buckets.reset(&self.alloc, false);
}
}
}
#[cfg(feature = "alloc")]
impl Scope<'_, Global> {
/// Returns new instance of arena allocator based on [`Global`] allocator.
#[inline(always)]
pub fn new() -> Self {
Scope::new_in(Global)
}
/// Returns new instance of arena allocator based on [`Global`] allocator
/// with preallocated capacity in bytes.
#[inline(always)]
pub fn with_capacity(capacity: usize) -> Self {
Scope::with_capacity_in(capacity, Global)
}
}
impl<A> Scope<'_, A>
where
A: Allocator,
{
/// Returns new instance of arena allocator based on provided allocator.
#[inline(always)]
pub fn new_in(alloc: A) -> Self {
Scope::with_capacity_in(0, alloc)
}
/// Returns new instance of arena allocator based on provided allocator
/// with preallocated capacity in bytes.
#[inline(always)]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Scope {
buckets: Buckets::new(capacity, &alloc).expect(ALLOCATOR_CAPACITY_OVERFLOW),
alloc,
drop_list: DropList::new(),
}
}
}
impl<A> Scope<'_, A>
where
A: Allocator,
{
#[inline(always)]
pub fn reset(&mut self) {
unsafe {
self.drop_list.reset();
self.buckets.reset(&self.alloc, true);
}
}
/// Allocates a block of memory.
/// Returns a [`NonNull<u8>`] meeting the size and alignment guarantees of layout.
/// The returned block contents should be considered uninitialized.
///
/// Returned block will be deallocated when scope is dropped.
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
#[inline(always)]
pub fn alloc(&self, layout: Layout) -> NonNull<[u8]> {
match self.try_alloc(layout) {
Ok(ptr) => ptr,
Err(_) => handle_alloc_error(layout),
}
}
/// Attempts to allocate a block of memory.
/// On success, returns a [`NonNull<u8>`] meeting the size and alignment guarantees of layout.
/// The returned block contents should be considered uninitialized.
///
/// Returned block will be deallocated when scope is dropped.
///
/// # Errors
///
/// Returning `Err` indicates that memory is exhausted.
#[inline(always)]
pub fn try_alloc(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
unsafe { self.buckets.allocate(layout, &self.alloc) }
}
/// Move value onto the scope.
/// Returns mutable reference to value with lifetime equal to scope borrow lifetime.
/// Value on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
#[inline(always)]
pub fn to_scope<T>(&self, value: T) -> &mut T {
self.to_scope_with(|| value)
}
/// Places value returned from function onto the scope.
/// Returns mutable reference to value with lifetime equal to scope borrow lifetime.
/// Value on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
#[inline(always)]
pub fn to_scope_with<F, T>(&self, f: F) -> &mut T
where
F: FnOnce() -> T,
{
match self.try_to_scope_with(f) {
Ok(value) => value,
Err(_) => handle_alloc_error(Layout::new::<T>()),
}
}
/// Tries to move value onto the scope.
/// On success, returns mutable reference to value with lifetime equal to scope borrow lifetime.
/// Value on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
///
/// # Errors
///
/// Returning `Err` indicates that memory is exhausted.
/// Returning `Err` contains original value.
#[inline(always)]
pub fn try_to_scope<T>(&self, value: T) -> Result<&mut T, (AllocError, T)> {
self.try_to_scope_with(|| value)
.map_err(|(err, f)| (err, f()))
}
/// Tries to place value return from function onto the scope.
/// On success, returns mutable reference to value with lifetime equal to scope borrow lifetime.
/// Value on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
///
/// # Errors
///
/// Returning `Err` indicates that memory is exhausted.
/// Returning `Err` contains original value.
#[inline(always)]
pub fn try_to_scope_with<F, T>(&self, f: F) -> Result<&mut T, (AllocError, F)>
where
F: FnOnce() -> T,
{
if needs_drop::<T>() {
match self.try_alloc(Layout::new::<WithDrop<T>>()) {
Ok(ptr) => {
let ptr = ptr.cast::<WithDrop<T>>();
let value = unsafe { WithDrop::init(ptr, f(), &self.drop_list) };
Ok(value)
}
Err(err) => Err((err, f)),
}
} else {
match self.try_alloc(Layout::new::<T>()) {
Ok(ptr) => {
let ptr = ptr.cast::<T>();
unsafe { write(ptr.as_ptr(), f()) };
Ok(unsafe { &mut *ptr.as_ptr() })
}
Err(err) => Err((err, f)),
}
}
}
/// Move values from iterator onto the scope.
/// Returns mutable reference to slice with lifetime equal to scope borrow lifetime.
/// Values on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
///
/// This method allocates memory to hold iterator's upper bound number of items. See [`core::iter::Iterator::size_hint`].
/// It will not consume more items.
/// This method will always fail for unbound iterators.
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
#[inline(always)]
pub fn to_scope_from_iter<T, I>(&self, iter: I) -> &mut [T]
where
I: IntoIterator<Item = T>,
{
use core::mem::align_of;
let too_large_layout = unsafe {
Layout::from_size_align_unchecked(usize::MAX - align_of::<T>(), align_of::<T>())
};
let iter = iter.into_iter();
let upper_bound = iter
.size_hint()
.1
.unwrap_or_else(|| handle_alloc_error(too_large_layout));
match self.try_to_scope_from_iter(iter) {
Ok(slice) => slice,
Err(_) => {
handle_alloc_error(Layout::array::<T>(upper_bound).unwrap_or(too_large_layout))
}
}
}
/// Tries to move values from iterator onto the scope.
/// On success, returns mutable reference to slice with lifetime equal to scope borrow lifetime.
/// Values on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
///
/// This method allocates memory to hold iterator's upper bound number of items. See [`core::iter::Iterator::size_hint`].
/// It will not consume more items.
/// This method will always fail for unbound iterators.
///
/// # Errors
///
/// Returning `Err` indicates that memory is exhausted.
/// Returning `Err` contains original iterator.
#[inline(always)]
pub fn try_to_scope_from_iter<T, I>(
&self,
iter: I,
) -> Result<&mut [T], (AllocError, I::IntoIter)>
where
I: IntoIterator<Item = T>,
{
let iter = iter.into_iter();
let upper_bound = match iter.size_hint().1 {
Some(upper_bound) => upper_bound,
None => return Err((AllocError, iter)),
};
if needs_drop::<T>() {
match WithDrop::<T>::array_layout(upper_bound) {
Some(layout) => match self.try_alloc(layout) {
Ok(ptr) => {
let ptr = ptr.cast::<WithDrop<T>>();
let slice = unsafe { WithDrop::init_array(ptr, iter, &self.drop_list) };
Ok(slice)
}
Err(err) => Err((err, iter)),
},
None => Err((AllocError, iter)),
}
} else {
match Layout::array::<T>(upper_bound) {
Ok(layout) => match self.try_alloc(layout) {
Ok(ptr) => {
let ptr = ptr.cast::<T>();
let mut item_count = 0;
unsafe {
for item in iter.take(upper_bound) {
write(ptr.as_ptr().add(item_count), item);
item_count += 1;
}
}
let slice =
unsafe { core::slice::from_raw_parts_mut(ptr.as_ptr(), item_count) };
Ok(&mut *slice)
}
Err(err) => Err((err, iter)),
},
Err(_) => Err((AllocError, iter)),
}
}
}
/// Reports total memory allocated from underlying allocator by associated arena.
#[inline(always)]
pub fn total_memory_usage(&self) -> usize {
self.buckets.total_memory_usage()
}
/// Creates scope proxy bound to the scope.
/// Any objects allocated through proxy will be attached to the scope.
/// Returned proxy will use reference to the underlying allocator.
#[inline(always)]
pub fn proxy_ref(&mut self) -> ScopeProxy<'_, &'_ A> {
ScopeProxy {
buckets: self.buckets.fork(),
alloc: &self.alloc,
drop_list: self.drop_list.fork(),
}
}
}
impl<A> Scope<'_, A>
where
A: Allocator + Clone,
{
/// Creates scope proxy bound to the scope.
/// Any objects allocated through proxy will be attached to the scope.
/// Returned proxy will use clone of the underlying allocator.
#[inline(always)]
pub fn proxy(&mut self) -> ScopeProxy<'_, A> {
ScopeProxy {
buckets: self.buckets.fork(),
alloc: self.alloc.clone(),
drop_list: self.drop_list.fork(),
}
}
}
/// Proxy for `Scope` which allocates memory bound to the scope lifetime and not itself.
/// This allows to create sub-scopes while keeping references to scoped values.
/// Does not frees memory and does not drops values moved on scope when dropped.
/// Parent `Scope` will do this.
#[cfg(not(feature = "alloc"))]
pub struct ScopeProxy<'scope, A: Allocator> {
buckets: Buckets<'scope>,
alloc: &'scope A,
drop_list: DropList<'scope>,
}
/// Proxy for `Scope` which allocates memory bound to the scope lifetime and not itself.
/// This allows to create sub-scopes while keeping references to scoped values.
/// Does not frees memory and does not drops values moved on scope when dropped.
/// Parent `Scope` will do this.
#[cfg(feature = "alloc")]
pub struct ScopeProxy<'scope, A: Allocator = Global> {
buckets: Buckets<'scope>,
alloc: A,
drop_list: DropList<'scope>,
}
impl<A> Debug for ScopeProxy<'_, A>
where
A: Allocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ScopeProxy")
.field("buckets", &self.buckets)
.finish_non_exhaustive()
}
}
impl<A> Drop for ScopeProxy<'_, A>
where
A: Allocator,
{
#[inline(always)]
fn drop(&mut self) {
unsafe {
self.drop_list.flush_fork();
self.buckets.flush_fork();
}
}
}
impl<'scope, A> ScopeProxy<'scope, A>
where
A: Allocator,
{
/// Allocates a block of memory.
/// Returns a [`NonNull<u8>`] meeting the size and alignment guarantees of layout.
/// The returned block contents should be considered uninitialized.
///
/// Returned block will be deallocated when scope is dropped.
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
#[inline(always)]
pub fn alloc(&self, layout: Layout) -> NonNull<[u8]> {
match self.try_alloc(layout) {
Ok(ptr) => ptr,
Err(_) => handle_alloc_error(layout),
}
}
/// Attempts to allocate a block of memory.
/// On success, returns a [`NonNull<u8>`] meeting the size and alignment guarantees of layout.
/// The returned block contents should be considered uninitialized.
///
/// Returned block will be deallocated when scope is dropped.
///
/// # Errors
///
/// Returning `Err` indicates that memory is exhausted.
#[inline(always)]
pub fn try_alloc(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
unsafe { self.buckets.allocate(layout, &self.alloc) }
}
/// Move value onto the scope.
/// Returns mutable reference to value with lifetime equal to 'scope lifetime.
/// Value on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
#[inline(always)]
pub fn to_scope<T>(&self, value: T) -> &'scope mut T {
self.to_scope_with(|| value)
}
/// Places value returned from function onto the scope.
/// Returns mutable reference to value with lifetime equal to scope borrow lifetime.
/// Value on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
#[inline(always)]
pub fn to_scope_with<F, T>(&self, f: F) -> &'scope mut T
where
F: FnOnce() -> T,
{
match self.try_to_scope_with(f) {
Ok(value) => value,
Err(_) => handle_alloc_error(Layout::new::<T>()),
}
}
/// Tries to move value onto the scope.
/// On success, returns mutable reference to value with lifetime to equal 'scope lifetime.
/// Value on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
///
/// # Errors
///
/// Returning `Err` indicates that memory is exhausted.
/// Returning `Err` contains original value.
#[inline(always)]
pub fn try_to_scope<T>(&self, value: T) -> Result<&'scope mut T, (AllocError, T)> {
self.try_to_scope_with(|| value)
.map_err(|(err, f)| (err, f()))
}
/// Tries to place value return from function onto the scope.
/// On success, returns mutable reference to value with lifetime equal to scope borrow lifetime.
/// Value on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
///
/// # Errors
///
/// Returning `Err` indicates that memory is exhausted.
/// Returning `Err` contains original value.
#[inline(always)]
pub fn try_to_scope_with<F, T>(&self, f: F) -> Result<&'scope mut T, (AllocError, F)>
where
F: FnOnce() -> T,
{
if needs_drop::<T>() {
match self.try_alloc(Layout::new::<WithDrop<T>>()) {
Ok(ptr) => {
let ptr = ptr.cast::<WithDrop<T>>();
let value = unsafe { WithDrop::init(ptr, f(), &self.drop_list) };
Ok(value)
}
Err(err) => Err((err, f)),
}
} else {
match self.try_alloc(Layout::new::<T>()) {
Ok(ptr) => {
let ptr = ptr.cast::<T>();
unsafe { write(ptr.as_ptr(), f()) };
Ok(unsafe { &mut *ptr.as_ptr() })
}
Err(err) => Err((err, f)),
}
}
}
/// Move values from iterator onto the scope.
/// Returns mutable reference to slice with lifetime equal to 'scope lifetime.
/// Values on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
///
/// This method allocates memory to hold iterator's upper bound number of items. See [`core::iter::Iterator::size_hint`].
/// It will not consume more items.
/// This method will always fail for unbound iterators.
#[cfg(all(not(no_global_oom_handling), feature = "alloc"))]
#[inline(always)]
pub fn to_scope_from_iter<T, I>(&self, iter: I) -> &'scope mut [T]
where
I: IntoIterator<Item = T>,
{
use core::mem::align_of;
let too_large_layout = unsafe {
Layout::from_size_align_unchecked(usize::MAX - align_of::<T>(), align_of::<T>())
};
let iter = iter.into_iter();
let upper_bound = iter
.size_hint()
.1
.unwrap_or_else(|| handle_alloc_error(too_large_layout));
match self.try_to_scope_from_iter(iter) {
Ok(slice) => slice,
Err(_) => {
handle_alloc_error(Layout::array::<T>(upper_bound).unwrap_or(too_large_layout))
}
}
}
/// Tries to move values from iterator onto the scope.
/// On success, returns mutable reference to slice with lifetime equal to 'scope lifetime.
/// Values on scope will be dropped when scope is dropped.
///
/// This method is as cheap as allocation if value does not needs dropping as reported by [`core::mem::needs_drop`].
///
/// This method allocates memory to hold iterator's upper bound number of items. See [`core::iter::Iterator::size_hint`].
/// It will not consume more items.
/// This method will always fail for unbound iterators.
///
/// # Errors
///
/// Returning `Err` indicates that memory is exhausted.
/// Returning `Err` contains original iterator.
#[inline(always)]
pub fn try_to_scope_from_iter<T, I>(
&self,
iter: I,
) -> Result<&'scope mut [T], (AllocError, I::IntoIter)>
where
I: IntoIterator<Item = T>,
{
let iter = iter.into_iter();
let upper_bound = match iter.size_hint().1 {
Some(upper_bound) => upper_bound,
None => return Err((AllocError, iter)),
};
if needs_drop::<T>() {
match WithDrop::<T>::array_layout(upper_bound) {
Some(layout) => match self.try_alloc(layout) {
Ok(ptr) => {
let ptr = ptr.cast::<WithDrop<T>>();
let slice = unsafe { WithDrop::init_array(ptr, iter, &self.drop_list) };
Ok(slice)
}
Err(err) => Err((err, iter)),
},
None => Err((AllocError, iter)),
}
} else {
match Layout::array::<T>(upper_bound) {
Ok(layout) => match self.try_alloc(layout) {
Ok(ptr) => {
let ptr = ptr.cast::<T>();
let mut item_count = 0;
unsafe {
for item in iter.take(upper_bound) {
write(ptr.as_ptr().add(item_count), item);
item_count += 1;
}
}
let slice =
unsafe { core::slice::from_raw_parts_mut(ptr.as_ptr(), item_count) };
Ok(&mut *slice)
}
Err(err) => Err((err, iter)),
},
Err(_) => Err((AllocError, iter)),
}
}
}
/// Reports total memory allocated from underlying allocator by associated arena.
#[inline(always)]
pub fn total_memory_usage(&self) -> usize {
self.buckets.total_memory_usage()
}
/// Creates new scope which inherits from the proxy's scope.
/// This scope becomes locked until returned scope is dropped.
/// Returned scope will use reference to the underlying allocator.
#[inline(always)]
pub fn scope_ref(&mut self) -> Scope<'_, &'_ A> {
Scope {
buckets: self.buckets.fork(),
alloc: &self.alloc,
drop_list: DropList::new(),
}
}
}
impl<A> ScopeProxy<'_, A>
where
A: Allocator + Clone,
{
/// Creates new scope which inherits from the proxy's scope.
/// This scope becomes locked until returned scope is dropped.
/// Returned scope will use clone of the underlying allocator.
#[inline(always)]
pub fn scope(&mut self) -> Scope<'_, A> {
Scope {
buckets: self.buckets.fork(),
alloc: self.alloc.clone(),
drop_list: DropList::new(),
}
}
}
const ALLOCATOR_CAPACITY_OVERFLOW: &'static str = "Allocator capacity overflow";
unsafe impl<A> Allocator for &'_ Scope<'_, A>
where
A: Allocator,
{
#[inline(always)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.try_alloc(layout)
}
#[inline(always)]
unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {
// Will be deallocated on scope drop.
}
#[cfg(feature = "allocator_api")]
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
// Returns same memory unchanged.
// This is valid behavior as change in layout won't affect deallocation
// and for `grow{_zeroed}` methods new layout with smaller size will only affect numbers of bytes copied.
Ok(NonNull::new_unchecked(core::slice::from_raw_parts_mut(
ptr.as_ptr(),
old_layout.size(),
)))
}
}
unsafe impl<A> Allocator for ScopeProxy<'_, A>
where
A: Allocator,
{
#[inline(always)]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.try_alloc(layout)
}
#[inline(always)]
unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {
// Will be deallocated on scope drop.
}
#[cfg(feature = "allocator_api")]
#[inline(always)]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
// Returns same memory unchanged.
// This is valid behavior as change in layout won't affect deallocation
// and for `grow{_zeroed}` methods new layout with smaller size will only affect numbers of bytes copied.
Ok(NonNull::new_unchecked(core::slice::from_raw_parts_mut(
ptr.as_ptr(),
old_layout.size(),
)))
}
}
| fmt |
tty.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use std::io::IoError;
use std::ptr;
use std::rt::rtio::RtioTTY;
use homing::{HomingIO, HomeHandle};
use stream::StreamWatcher;
use super::{UvError, UvHandle, uv_error_to_io_error};
use uvio::UvIoFactory;
use uvll;
pub struct TtyWatcher{
tty: *uvll::uv_tty_t,
stream: StreamWatcher,
home: HomeHandle,
fd: libc::c_int,
}
impl TtyWatcher {
pub fn new(io: &mut UvIoFactory, fd: libc::c_int, readable: bool)
-> Result<TtyWatcher, UvError>
{
// libuv may succeed in giving us a handle (via uv_tty_init), but if the
// handle isn't actually connected to a terminal there are frequently
// many problems in using it with libuv. To get around this, always
// return a failure if the specified file descriptor isn't actually a
// TTY.
//
// Related:
// - https://github.com/joyent/libuv/issues/982
// - https://github.com/joyent/libuv/issues/988
let guess = unsafe { uvll::guess_handle(fd) };
if guess != uvll::UV_TTY as libc::c_int {
return Err(UvError(uvll::EBADF));
}
// libuv was recently changed to not close the stdio file descriptors,
// but it did not change the behavior for windows. Until this issue is
// fixed, we need to dup the stdio file descriptors because otherwise
// uv_close will close them
let fd = if cfg!(windows) && fd <= libc::STDERR_FILENO {
unsafe { libc::dup(fd) }
} else | ;
// If this file descriptor is indeed guessed to be a tty, then go ahead
// with attempting to open it as a tty.
let handle = UvHandle::alloc(None::<TtyWatcher>, uvll::UV_TTY);
let mut watcher = TtyWatcher {
tty: handle,
stream: StreamWatcher::new(handle),
home: io.make_handle(),
fd: fd,
};
match unsafe {
uvll::uv_tty_init(io.uv_loop(), handle, fd as libc::c_int,
readable as libc::c_int)
} {
0 => Ok(watcher),
n => {
// On windows, libuv returns errors before initializing the
// handle, so our only cleanup is to free the handle itself
if cfg!(windows) {
unsafe { uvll::free_handle(handle); }
watcher.tty = ptr::null();
}
Err(UvError(n))
}
}
}
}
impl RtioTTY for TtyWatcher {
fn read(&mut self, buf: &mut [u8]) -> Result<uint, IoError> {
let _m = self.fire_homing_missile();
self.stream.read(buf).map_err(uv_error_to_io_error)
}
fn write(&mut self, buf: &[u8]) -> Result<(), IoError> {
let _m = self.fire_homing_missile();
self.stream.write(buf, false).map_err(uv_error_to_io_error)
}
fn set_raw(&mut self, raw: bool) -> Result<(), IoError> {
let raw = raw as libc::c_int;
let _m = self.fire_homing_missile();
match unsafe { uvll::uv_tty_set_mode(self.tty, raw) } {
0 => Ok(()),
n => Err(uv_error_to_io_error(UvError(n)))
}
}
#[allow(unused_mut)]
fn get_winsize(&mut self) -> Result<(int, int), IoError> {
let mut width: libc::c_int = 0;
let mut height: libc::c_int = 0;
let widthptr: *libc::c_int = &width;
let heightptr: *libc::c_int = &width;
let _m = self.fire_homing_missile();
match unsafe { uvll::uv_tty_get_winsize(self.tty,
widthptr, heightptr) } {
0 => Ok((width as int, height as int)),
n => Err(uv_error_to_io_error(UvError(n)))
}
}
fn isatty(&self) -> bool {
unsafe { uvll::guess_handle(self.fd) == uvll::UV_TTY as libc::c_int }
}
}
impl UvHandle<uvll::uv_tty_t> for TtyWatcher {
fn uv_handle(&self) -> *uvll::uv_tty_t { self.tty }
}
impl HomingIO for TtyWatcher {
fn home<'a>(&'a mut self) -> &'a mut HomeHandle { &mut self.home }
}
impl Drop for TtyWatcher {
fn drop(&mut self) {
if !self.tty.is_null() {
let _m = self.fire_homing_missile();
self.close_async_();
}
}
}
| { fd } |
media.js | // upload picture
/*$('#med_pic').on('input',function () {
let fd = new FormData(),
file = $(this)[0].files,
_self =$(this)[0],
fol = $('#fldr').val();
fd.append('file',file[0]);
fd.append('folder',fol);
ajaxCsrf();
$.ajax({
url: 'media/upload',
type: 'post',
data: fd,
contentType: false,
processData: false,
success: function(response){
// console.log(response);
if(response.status == 'success'){
let div = '<div class="img_box" data-id="'+response.id +'">' +
'<span aria-hidden="true" class="rem">' +
'<i class="fa fa-close img_del"></i></span>' +
'<p class="vert">' + readableBytes(response.size) +'</p>'+
'<img class="blah" src="'+response.path +'" />' +
'<a href="javascript:void(0) " style="margin-top: 1px" data-path="' + response.public_url + '">copy public path</a>' +
'</div>';
if(fol == '0'){
$('.img_bl').append(div);
} else{
$('.f_box[data-id='+fol+']').find('.count').text(response.cnt);
$('.f_data').append(div);
}
flashMessage(response.message)
// alert("ok");
// readURL(_self)
}else{
// $.each(response.error.file,function (i,v) {
// console.log(v);
flashMessage(response.error.file,'red')
// })
}
},
error: function (xhr) {
console.log((xhr));
}
});
});*/
// function readURL(input) {
// if (input.files && input.files[0]) {
// let reader = new FileReader(),
// img_bl = null;
// reader.onload = function(e) {
// img_bl = $("<div class='img_box'><img class='blah' src="+e.target.result+" /></div>")
// console.log(img_bl);
// $('.img_bl').append(img_bl[0])
// }
// reader.readAsDataURL(input.files[0]); // convert to base64 string
// }
// }
$(document).ready(function () {
$('#fldr').val(0);
})
let $modal = $('#modal');
let image = document.getElementById('image');
let cropper;
let send_data= [];
$(document).on('click', '.blah', function (e) {
if(!$(this).hasClass('blank')){
let url = $(this).attr('src');
$modal.modal('show');
image.src = url;
let fname = url.replace(/^.*[\\\/]/, ''),
id = $(this).parent().data('id');
fetch(url)
.then(function (response) {
return response.blob()
})
.then(function (blob) {
// send('k', blob,fname,id)
send_data.push('k')
send_data.push(blob)
send_data.push(fname)
send_data.push(id)
});
}
});
$("body").on("change", ".image", function(e){
let files = e.target.files;
const file11 = this.files[0];
const fileType = file11['type'];
const validImageTypes = ['image/gif', 'image/jpeg', 'image/png'];
// let done = function (url) {
// image.src = url;
// $modal.modal('show');
// // send()
// };
// let reader;
// let file;
// let url;
// if (files && files.length > 0 && validImageTypes.includes(fileType)) {
// file = files[0];
// if (URL) {
// done(URL.createObjectURL(file));
// } else if (FileReader) {
// reader = new FileReader();
// reader.onload = function (e) {
// done(reader.result);
// };
// reader.readAsDataURL(file);
// }
// }
// else {
let fd = new FormData($(this).parents('form')[0]),
fol = $('#fldr').val();
fd.append('folder',fol);
// fd.append('file',file);
// fd.append('folder',fol);
$.ajaxSetup({
headers: {
'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content')
}
});
$.ajax({
url: 'media/file_upload',
type: 'post',
dataType: "json",
data: function(){
let data = new FormData();
jQuery.each(jQuery('#med_pic')[0].files, function(i, file) {
data.append('file[]', file);
});
data.append('fol', fol);
return data;
}(),
contentType: false,
processData: false,
success: function (response) {
$.each(response,function (i,val) {
let path = val.type == 'image' ? val.path : '/images/doc.png' ;
let ext = val.type === 'image' ? '': '.'+ val.type;
let add_class = val.type === 'image' ? 'blah': 'blah blank';
let fileNameIndex = val.path.lastIndexOf("/") + 1;
let description = val.type === 'image' ? val.path.substr(fileNameIndex).slice(0,20) +' / '+ readableBytes(val.size) +' '+ ext : val.path.substr(fileNameIndex).slice(0,20);
let div = '<div class="img_box" data-id="'+val.id +'">' +
'<span aria-hidden="true" class="rem">' +
'<i class="fa fa-close img_del"></i></span>' +
'<p class="vert">' + description+'</p>'+
'<img class="'+ add_class+'" src="'+ path+'" />' +
'<a href="javascript:void(0) " style="margin-top: 1px" data-path="' + val.public_url + '">copy public path</a>' +
'</div>';
if(fol == '0'){
$('.img_bl').append(div);
} else{
$('.f_box[data-id='+fol+']').find('.count').text(val.cnt);
$('.f_data').append(div);
}
})
}
})
// flashMessage('unsupported media type !','red');
// }
});
$modal.on('shown.bs.modal', function () {
cropper = new Cropper(image, {
// aspectRatio: 1,
viewMode:3,
preview: '.preview',
crop(event) {
cropImage(
event.detail.x,
event.detail.y,
event.detail.width,
event.detail.height,
event.detail.scaleX,
event.detail.scaleY);
$('.w_value').html(Math.round(event.detail.width) + 'px');
$('.h_value').html(Math.round(event.detail.height) + 'px');
},
});
// console.log(x);
}).on('hidden.bs.modal', function () {
cropper.destroy();
cropper = null;
send_data = [];
});
let arr = {};
function cropImage(x,y,w,h,X,Y){
arr = {
'x':x,
'y':y,
'w':w,
'h':h,
'X':X,
'Y':Y,
};
return arr;
}
$("#crop").on('click',function(){
let fd = new FormData(),
// _self =$('#med_pic')[0],
fol = $('#fldr').val(),
file;
if(send_data.length > 1 && send_data[0] == 'k'){
fd.append('param',send_data[0]);
fd.append('file',send_data[1],send_data[2]);
} else{
file = $('#med_pic')[0].files[0];
fd.append('file',file);
}
fd.append('folder',fol);
fd.append('prop',JSON.stringify(arr));
$.ajaxSetup({
headers: {
'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content')
}
});
$.ajax({
url: 'media/upload',
type: 'post',
// dataType: "json",
data: fd,
contentType: false,
processData: false,
success: function(response){
// console.log(response);
if(response.status == 'success' && response.edited !== true){
let div = '<div class="img_box" data-id="'+response.id +'">' +
'<span aria-hidden="true" class="rem">' +
'<i class="fa fa-close img_del"></i></span>' +
'<p class="vert">' + readableBytes(response.size) +'</p>'+
'<img class="blah" src="'+response.path +'" />' +
'<a href="javascript:void(0) " style="margin-top: 1px" data-path="' + response.public_url + '">copy public path</a>' +
'</div>';
if(fol == '0'){
$('.img_bl').append(div);
} else{
$('.f_box[data-id='+fol+']').find('.count').text(response.cnt);
$('.f_data').append(div);
}
$('.modal').modal('hide');
flashMessage(response.message)
fol = 0;
}else if(response.edited == true){
let d = new Date();
$(".img_box[data-id='" + send_data[3] + "']").find('img').attr('src', response.path+'?'+d.getTime());
$(".img_box[data-id='" + send_data[3] + "']").find('.vert').text(readableBytes(response.size));
$('.modal').modal('hide');
flashMessage(response.message);
send_data = [];
} else{
flashMessage(response.error.file,'red')
}
},
error: function (xhr) {
console.log((xhr));
}
});
})
// }
// add folder modal
$('.toolbar_m').click(function () {
$('#folder_modal').modal('show');
});
$('#folder_modal #save_f').click(function () {
let name = $('#f_name').val(),
url = $('#folder_modal form').attr('action');
ajaxCsrf();
$.ajax({
url: url,
type: 'post',
data: {'name': name},
dataType: 'json',
success: function(response){
if(response.status == 'success'){
$('#folder_modal').modal('hide');
let fl = '<div class="f_box" data-id='+response.id+'>\n' +
' <div class="folder_box" >\n' +
'<span class="count"></span>\n' +
' <i class="fa fa-folder" aria-hidden="true" ></i><span>'+ response.name +'</span>\n' +
' </div><div class="rem_fol">delete folder</div>\n' +
' </div>';
$('.folder_bl').append(fl);
window.location.reload()
}else{
alert('folder not created');
}
},
});
});
// delete image
$(document).on('click','.img_del',function () {
let id = $(this).closest('.img_box').data('id'),
fol = $(this).closest('.img_box').data('f');
ajaxCsrf();
$.ajax({
url: 'media/delete_file',
type: 'post',
data: {'id': id,'folder':fol},
dataType: 'json',
success: function(response){
if(response.status == 'success'){
$('.img_box[data-id='+response.id+']').remove();
$('.f_box[data-id='+fol+']').find('.count').text(response.cnt);
flashMessage(response.message)
}else{
flashMessage(response.message,'red')
}
},
});
});
// enter folder
$(document).on('dblclick','.f_box',function (e) {
if(e.target.className != 'rem_fol'){
$('.f_level .f_data').html('')
let id = $(this).data('id');
$('#fldr').val(id);
ajaxCsrf();
$.ajax({
url: 'media/open_folder',
type: 'post',
data: {'id': id},
dataType: 'json',
success: function(response){
if(response.status == 'success') {
$.each(response.images, function (i, v) {
let path = v.image.type === 'image' ? v.path : '/images/doc.png' ,
ext = v.image.type === 'image' ? '': v.image.type;
let add_class = v.image.type === 'image' ? 'blah': 'blah blank';
let fileNameIndex = v.image.path.lastIndexOf("/") + 1;
let description = v.image.type === 'image' ? v.image.path.substr(fileNameIndex).slice(0,10) +' / '+ readableBytes(v.size) +' '+ ext : v.image.path.substr(fileNameIndex).slice(0,20);
if(v.image) {
$('.f_level .f_data').append(
'<div class="img_box" data-id="' + v.image.id + '" data-f="' + id + '">\n' +
' <span aria-hidden="true" class="rem"><i class="fa fa-close img_del"></i></span>\n' +
'<p class="vert">'+ description +'</p>'+
' <img class="'+add_class+'" src="' + path + '">\n' +
'<a href="javascript:void(0) " style="margin-top: 1px" data-path="' + v.path + '">copy public path</a>' +
'</div>')
}
});
$('.r_level').hide();
// $('.toolbar_m .fa-folder').css({'color':'red'})
// $('.toolbar_m span').css({'color':'red'})
$('.toolbar_m').hide();
$('.f_level').show();
$('#r_menu').after('<span>->' + response.f_name + '</span>')
} else {
alert('error deleting');
}
},
});
}
});
$('#r_menu').click(function () {
$('#fldr').val('0');
$('.f_level').hide();
$('.r_level').show();
$('#r_menu').siblings('span').remove();
$('.toolbar_m').show();
});
$(document).on('mouseenter','.f_box',function (e) {
$(this).find('.rem_fol').slideDown('fast')
});
$(document).on('mouseleave','.f_box',function (e) {
$(this).find('.rem_fol').slideUp('fast')
});
// delete folder
$('.rem_fol').click(function () {
let id = $(this).parent('.f_box').data('id');
ajaxCsrf();
$.ajax({
url: 'media/delete_folder',
type: 'post',
data: {'id': id},
dataType: 'json',
success: function(response){
if(response.status == 'success'){
$('.f_box[data-id='+response.id+']').remove();
flashMessage(response.message)
}else{
flashMessage(response.message,'red')
}
},
});
});
// copy public path
$(document).on('click','.img_box a',function () {
let value = $(this).data('path');
copyToClipboard(value,this);
});
function copyToClipboard(value,_this) {
$(_this).text('copied').css('background-color', '#0a6aa1');
let $tmpInput = $('<input>');
$tmpInput.val(value);
$('body').append($tmpInput);
$tmpInput.select();
document.execCommand('copy');
$tmpInput.remove();
setTimeout(function () {
$(_this).text('copy public path').css('background-color', 'darkgrey');
},2000)
}
//
$(document).on('click','.img_box a',function () {
let value = $(this).data('path');
copyToClipboard(value,this);
});
function | (value,_this) {
$(_this).text('copied').css('background-color', '#0a6aa1');
let $tmpInput = $('<input>');
$tmpInput.val(value);
$('body').append($tmpInput);
$tmpInput.select();
document.execCommand('copy');
$tmpInput.remove();
setTimeout(function () {
$(_this).text('copy public path').css('background-color', 'darkgrey');
},2000)
}
function ajaxCsrf() {
return $.ajaxSetup({
headers: {
'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content')
}
});
}
function readableBytes(bytes) {
let i = Math.floor(Math.log(bytes) / Math.log(1024)),
sizes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];
return (bytes / Math.pow(1024, i)).toFixed(2) * 1 + ' ' + sizes[i];
}
| copyToClipboard |
get-root.ts | import {prepareApi} from "../../common";
import * as supertest from "supertest";
import {ApiCodes, Bcrypt, Const, Employee, Web} from "../../../src";
describe("GET /stuffs", () => {
prepareApi();
let token: string;
beforeEach(async () => {
await Employee.create({
email: "[email protected]",
language: "ru",
moderator: true,
name: "admin",
password: await Bcrypt.hash("super mario"),
phone: "+79123456789",
tfa: false,
});
token = (await supertest(Web.instance.app)
.post(`${Const.API_MOUNT_POINT}/signin`)
.send({
email: "[email protected]",
password: "super mario",
})
.expect(200)).body.token;
});
it("Success 200", async () => {
await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.set("Authorization", `Bearer ${token}`)
.expect(200, {
data: [],
});
await supertest(Web.instance.app)
.post(`${Const.API_MOUNT_POINT}/stuffs`)
.set("Authorization", `Bearer ${token}`)
.send({
amountType: "kg",
})
.expect(201, {
id: "1",
});
await supertest(Web.instance.app)
.post(`${Const.API_MOUNT_POINT}/stuffs`)
.set("Authorization", `Bearer ${token}`)
.send({
amountType: "piece",
})
.expect(201, {
id: "2",
});
await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.set("Authorization", `Bearer ${token}`)
.expect(200, {
data: [
{
amountType: "kg",
enabled: true,
id: "1",
},
{
amountType: "piece",
enabled: true,
id: "2",
},
],
});
await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.query({
limit: "1",
})
.set("Authorization", `Bearer ${token}`)
.expect(200, {
data: [
{
amountType: "kg",
enabled: true,
id: "1",
},
],
});
await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.query({
offset: "1",
})
.set("Authorization", `Bearer ${token}`)
.expect(200, {
data: [
{
amountType: "piece",
enabled: true,
id: "2",
},
],
});
});
it("Bad Request 400", async () => {
await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.query({
limit: "aaa",
})
.set("Authorization", `Bearer ${token}`)
.expect(400, {
code: ApiCodes.BAD_REQUEST,
message: `child "limit" fails because ["limit" must be an integer]`,
});
await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.query({
offset: "aaa",
})
.set("Authorization", `Bearer ${token}`)
.expect(400, {
code: ApiCodes.BAD_REQUEST,
message: `child "offset" fails because ["offset" must be an integer]`,
});
await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.query({
extraField: true,
})
.set("Authorization", `Bearer ${token}`)
.expect(400, {
code: ApiCodes.BAD_REQUEST,
message: `"extraField" is not allowed`,
});
});
| await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.expect(401, {
code: ApiCodes.JWT_VERIFY_USER,
message: "jwt must be provided",
});
await supertest(Web.instance.app)
.get(`${Const.API_MOUNT_POINT}/stuffs`)
.set("Authorization", `Bearer token`)
.expect(401, {
code: ApiCodes.JWT_VERIFY_USER,
message: "jwt malformed",
});
});
}); | it("Unauthorized 401", async () => { |
__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wildcard-import, arguments-differ
r"""Module for pre-defined NLP models.
This module contains definitions for the following model architectures:
- `AWD`_
You can construct a model with random weights by calling its constructor. Because NLP models
are tied to vocabularies, you can either specify a dataset name to load and use the vocabulary
of that dataset:
.. code-block:: python
import gluonnlp as nlp
awd, vocab = nlp.model.awd_lstm_lm_1150(dataset_name='wikitext-2')
or directly specify a vocabulary object:
.. code-block:: python
awd, vocab = nlp.model.awd_lstm_lm_1150(None, vocab=custom_vocab)
We provide pre-trained models for all the listed models.
These models can constructed by passing ``pretrained=True``:
.. code-block:: python
awd, vocab = nlp.model.awd_lstm_lm_1150(dataset_name='wikitext-2'
pretrained=True)
.. _AWD: https://arxiv.org/abs/1404.5997
- `ELMo`_
You can construct a predefined ELMo model structure:
.. code-block:: python
import gluonnlp as nlp
elmo = nlp.model.elmo_2x1024_128_2048cnn_1xhighway(dataset_name='gbw')
You can also get a ELMo model with pretrained parameters:
.. code-block:: python
import gluonnlp as nlp
elmo = nlp.model.elmo_2x1024_128_2048cnn_1xhighway(dataset_name='gbw', pretrained=True)
.. _ELMo: https://arxiv.org/pdf/1802.05365.pdf
"""
from . import (attention_cell, bert, bilm_encoder, block,
convolutional_encoder, elmo, highway, language_model,
lstmpcellwithclip, parameter, sampled_block,
seq2seq_encoder_decoder, sequence_sampler, train, transformer,
utils)
from .attention_cell import *
from .bert import *
from .bilm_encoder import BiLMEncoder
from .block import *
from .convolutional_encoder import *
from .elmo import *
from .highway import *
from .language_model import *
from .lstmpcellwithclip import LSTMPCellWithClip
from .parameter import *
from .sampled_block import *
from .seq2seq_encoder_decoder import *
from .sequence_sampler import *
from .transformer import *
from .translation import *
from .utils import *
__all__ = language_model.__all__ + sequence_sampler.__all__ + attention_cell.__all__ + \
utils.__all__ + parameter.__all__ + block.__all__ + highway.__all__ + \
convolutional_encoder.__all__ + sampled_block.__all__ + ['get_model'] + ['train'] + \
bilm_encoder.__all__ + lstmpcellwithclip.__all__ + elmo.__all__ + \
seq2seq_encoder_decoder.__all__ + transformer.__all__ + bert.__all__
def get_model(name, **kwargs):
| """Returns a pre-defined model by name.
Parameters
----------
name : str
Name of the model.
dataset_name : str or None, default None
The dataset name on which the pre-trained model is trained.
For language model, options are 'wikitext-2'.
For ELMo, Options are 'gbw' and '5bw'.
'gbw' represents 1 Billion Word Language Model Benchmark
http://www.statmt.org/lm-benchmark/;
'5bw' represents a dataset of 5.5B tokens consisting of
Wikipedia (1.9B) and all of the monolingual news crawl data from WMT 2008-2012 (3.6B).
If specified, then the returned vocabulary is extracted from
the training set of the dataset.
If None, then vocab is required, for specifying embedding weight size, and is directly
returned.
vocab : gluonnlp.Vocab or None, default None
Vocabulary object to be used with the language model.
Required when dataset_name is not specified.
None Vocabulary object is required with the ELMo model.
pretrained : bool, default False
Whether to load the pre-trained weights for model.
ctx : Context, default CPU
The context in which to load the pre-trained weights.
root : str, default '$MXNET_HOME/models' with MXNET_HOME defaults to '~/.mxnet'
Location for keeping the model parameters.
Returns
-------
gluon.Block, gluonnlp.Vocab, (optional) gluonnlp.Vocab
"""
models = {'standard_lstm_lm_200' : standard_lstm_lm_200,
'standard_lstm_lm_650' : standard_lstm_lm_650,
'standard_lstm_lm_1500': standard_lstm_lm_1500,
'awd_lstm_lm_1150': awd_lstm_lm_1150,
'awd_lstm_lm_600': awd_lstm_lm_600,
'big_rnn_lm_2048_512': big_rnn_lm_2048_512,
'elmo_2x1024_128_2048cnn_1xhighway': elmo_2x1024_128_2048cnn_1xhighway,
'elmo_2x2048_256_2048cnn_1xhighway': elmo_2x2048_256_2048cnn_1xhighway,
'elmo_2x4096_512_2048cnn_2xhighway': elmo_2x4096_512_2048cnn_2xhighway,
'transformer_en_de_512': transformer_en_de_512,
'bert_12_768_12' : bert_12_768_12,
'bert_24_1024_16' : bert_24_1024_16,
'roberta_12_768_12' : roberta_12_768_12,
'roberta_24_1024_16' : roberta_24_1024_16,
'ernie_12_768_12' : ernie_12_768_12}
name = name.lower()
if name not in models:
raise ValueError(
'Model %s is not supported. Available options are\n\t%s'%(
name, '\n\t'.join(sorted(models.keys()))))
return models[name](**kwargs) |
|
help.rs | use std::fmt;
use options::flags;
use options::parser::MatchedFlags;
use fs::feature::xattr;
static OPTIONS: &str = r##"
-?, --help show list of command-line options
-v, --version show version of exa
DISPLAY OPTIONS
-1, --oneline display one entry per line
-l, --long display extended file metadata as a table
-G, --grid display entries as a grid (default)
-x, --across sort the grid across, rather than downwards
-R, --recurse recurse into directories
-T, --tree recurse into directories as a tree
-F, --classify display type indicator by file names
--colo[u]r=WHEN when to use terminal colours (always, auto, never)
--colo[u]r-scale highlight levels of file sizes distinctly
FILTERING AND SORTING OPTIONS
-a, --all show hidden and 'dot' files
-d, --list-dirs list directories like regular files
-D, --dirs-only list only directories
-r, --reverse reverse the sort order
-s, --sort SORT_FIELD which field to sort by
--group-directories-first list directories before other files
-I, --ignore-glob GLOBS glob patterns (pipe-separated) of files to ignore
--git-ignore Ignore files mentioned in '.gitignore'
Valid sort fields: name, Name, extension, Extension, size, type,
modified, accessed, created, inode, and none.
date, time, old, and new all refer to modified.
"##;
static LONG_OPTIONS: &str = r##"
LONG VIEW OPTIONS
-b, --binary list file sizes with binary prefixes | -h, --header add a header row to each column
-H, --links list each file's number of hard links
-i, --inode list each file's inode number
-L, --level DEPTH limit the depth of recursion
-m, --modified use the modified timestamp field
-S, --blocks show number of file system blocks
-t, --time FIELD which timestamp field to list (modified, accessed, created)
-u, --accessed use the accessed timestamp field
-U, --created use the created timestamp field
--time-style how to format timestamps (default, iso, long-iso, full-iso)"##;
static GIT_HELP: &str = r##" --git list each file's Git status, if tracked"##;
static EXTENDED_HELP: &str = r##" -@, --extended list each file's extended attributes and sizes"##;
/// All the information needed to display the help text, which depends
/// on which features are enabled and whether the user only wants to
/// see one section’s help.
#[derive(PartialEq, Debug)]
pub struct HelpString {
/// Only show the help for the long section, not all the help.
only_long: bool,
/// Whether the --git option should be included in the help.
git: bool,
/// Whether the --extended option should be included in the help.
xattrs: bool,
}
impl HelpString {
/// Determines how to show help, if at all, based on the user’s
/// command-line arguments. This one works backwards from the other
/// ‘deduce’ functions, returning Err if help needs to be shown.
///
/// We don’t do any strict-mode error checking here: it’s OK to give
/// the --help or --long flags more than once. Actually checking for
/// errors when the user wants help is kind of petty!
pub fn deduce(matches: &MatchedFlags) -> Result<(), HelpString> {
if matches.count(&flags::HELP) > 0 {
let only_long = matches.count(&flags::LONG) > 0;
let git = cfg!(feature="git");
let xattrs = xattr::ENABLED;
Err(HelpString { only_long, git, xattrs })
}
else {
Ok(()) // no help needs to be shown
}
}
}
impl fmt::Display for HelpString {
/// Format this help options into an actual string of help
/// text to be displayed to the user.
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
writeln!(f, "Usage:\n exa [options] [files...]")?;
if !self.only_long {
write!(f, "{}", OPTIONS)?;
}
write!(f, "{}", LONG_OPTIONS)?;
if self.git {
write!(f, "\n{}", GIT_HELP)?;
}
if self.xattrs {
write!(f, "\n{}", EXTENDED_HELP)?;
}
Ok(())
}
}
#[cfg(test)]
mod test {
use options::Options;
use std::ffi::OsString;
fn os(input: &'static str) -> OsString {
let mut os = OsString::new();
os.push(input);
os
}
#[test]
fn help() {
let args = [ os("--help") ];
let opts = Options::parse(&args, &None);
assert!(opts.is_err())
}
#[test]
fn help_with_file() {
let args = [ os("--help"), os("me") ];
let opts = Options::parse(&args, &None);
assert!(opts.is_err())
}
#[test]
fn unhelpful() {
let args = [];
let opts = Options::parse(&args, &None);
assert!(opts.is_ok()) // no help when --help isn’t passed
}
} | -B, --bytes list file sizes in bytes, without any prefixes
-g, --group list each file's group |
routing.go | /*
* Copyright 2018 The Trickster Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package routing is the Trickster Request Router
package routing
import (
"fmt"
"net/http"
"net/http/pprof"
"sort"
"strings"
"github.com/trickstercache/trickster/v2/cmd/trickster/config"
"github.com/trickstercache/trickster/v2/pkg/backends"
"github.com/trickstercache/trickster/v2/pkg/backends/alb"
"github.com/trickstercache/trickster/v2/pkg/backends/healthcheck"
bo "github.com/trickstercache/trickster/v2/pkg/backends/options"
"github.com/trickstercache/trickster/v2/pkg/backends/providers"
"github.com/trickstercache/trickster/v2/pkg/backends/providers/registration"
"github.com/trickstercache/trickster/v2/pkg/backends/reverseproxycache"
"github.com/trickstercache/trickster/v2/pkg/backends/rule"
"github.com/trickstercache/trickster/v2/pkg/cache"
encoding "github.com/trickstercache/trickster/v2/pkg/encoding/handler"
tl "github.com/trickstercache/trickster/v2/pkg/observability/logging"
"github.com/trickstercache/trickster/v2/pkg/observability/tracing"
"github.com/trickstercache/trickster/v2/pkg/proxy/handlers/health"
"github.com/trickstercache/trickster/v2/pkg/proxy/methods"
"github.com/trickstercache/trickster/v2/pkg/proxy/paths/matching"
po "github.com/trickstercache/trickster/v2/pkg/proxy/paths/options"
"github.com/trickstercache/trickster/v2/pkg/proxy/request/rewriter"
"github.com/trickstercache/trickster/v2/pkg/util/middleware"
"github.com/gorilla/mux"
)
// RegisterPprofRoutes will register the Pprof Debugging endpoints to the provided router
func RegisterPprofRoutes(routerName string, h *http.ServeMux, logger interface{}) {
tl.Info(logger,
"registering pprof /debug routes", tl.Pairs{"routerName": routerName})
h.HandleFunc("/debug/pprof/", pprof.Index)
h.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
h.HandleFunc("/debug/pprof/profile", pprof.Profile)
h.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
h.HandleFunc("/debug/pprof/trace", pprof.Trace)
}
// RegisterProxyRoutes iterates the Trickster Configuration and
// registers the routes for the configured backends
func RegisterProxyRoutes(conf *config.Config, router *mux.Router, metricsRouter *http.ServeMux,
caches map[string]cache.Cache, tracers tracing.Tracers,
logger interface{}, dryRun bool) (backends.Backends, error) {
// a fake "top-level" backend representing the main frontend, so rules can route
// to it via the clients map
tlo, _ := reverseproxycache.NewClient("frontend", &bo.Options{}, router, nil, nil, nil)
// proxyClients maintains a list of proxy clients configured for use by Trickster
var clients = backends.Backends{"frontend": tlo}
var err error
defaultBackend := ""
var ndo *bo.Options // points to the backend options named "default"
var cdo *bo.Options // points to the backend options with IsDefault set to true
// This iteration will ensure default backends are handled properly
for k, o := range conf.Backends {
if !providers.IsValidProvider(o.Provider) {
return nil,
fmt.Errorf(`unknown backend provider in backend options. backendName: %s, backendProvider: %s`,
k, o.Provider)
}
// Ensure only one default backend exists
if o.IsDefault {
if cdo != nil {
return nil,
fmt.Errorf("only one backend can be marked as default. Found both %s and %s",
defaultBackend, k)
}
tl.Debug(logger, "default backend identified", tl.Pairs{"name": k})
defaultBackend = k
cdo = o
continue
}
// handle backend named "default" last as it needs special
// handling based on a full pass over the range
if k == "default" {
ndo = o
continue
}
err = registerBackendRoutes(router, metricsRouter, conf,
k, o, clients, caches, tracers, logger, dryRun)
if err != nil {
return nil, err
}
}
if ndo != nil {
if cdo == nil {
ndo.IsDefault = true
cdo = ndo
defaultBackend = "default"
} else {
err = registerBackendRoutes(router, nil, conf, "default", ndo, clients, caches, tracers, logger, dryRun)
if err != nil {
return nil, err
}
}
}
if cdo != nil {
err = registerBackendRoutes(router, metricsRouter, conf,
defaultBackend, cdo, clients, caches, tracers, logger, dryRun)
if err != nil {
return nil, err
}
}
err = rule.ValidateOptions(clients, conf.CompiledRewriters)
if err != nil {
return nil, err
}
err = alb.ValidatePools(clients)
if err != nil {
return nil, err
}
return clients, nil
}
var noCacheBackends = map[string]interface{}{
"alb": nil,
"rp": nil,
"reverseproxy": nil,
"proxy": nil,
"rule": nil,
}
// RegisterHealthHandler registers the main health handler
func RegisterHealthHandler(router *http.ServeMux, path string, hc healthcheck.HealthChecker) {
router.Handle(path, health.StatusHandler(hc))
}
func registerBackendRoutes(router *mux.Router, metricsRouter *http.ServeMux, conf *config.Config, k string,
o *bo.Options, clients backends.Backends, caches map[string]cache.Cache,
tracers tracing.Tracers, logger interface{}, dryRun bool) error {
var client backends.Backend
var c cache.Cache
var ok bool
var err error
if _, ok = noCacheBackends[o.Provider]; !ok {
if c, ok = caches[o.CacheName]; !ok {
return fmt.Errorf("could not find cache named [%s]", o.CacheName)
}
}
if !dryRun {
tl.Info(logger, "registering route paths", tl.Pairs{"backendName": k,
"backendProvider": o.Provider, "upstreamHost": o.Host})
}
cf := registration.SupportedProviders()
if f, ok := cf[strings.ToLower(o.Provider)]; ok && f != nil {
client, err = f(k, o, mux.NewRouter(), c, clients, cf)
}
if err != nil {
return err
}
if client != nil && !dryRun {
o.HTTPClient = client.HTTPClient()
clients[k] = client
defaultPaths := client.DefaultPathConfigs(o)
h := client.Handlers()
RegisterPathRoutes(router, h, client, o, c, defaultPaths,
tracers, conf.Main.HealthHandlerPath, logger)
// now we'll go ahead and register the health handler
if h, ok := client.Handlers()["health"]; ok && o.Name != "" && metricsRouter != nil && (o.HealthCheck == nil ||
o.HealthCheck.Verb != "x") {
hp := strings.Replace(conf.Main.HealthHandlerPath+"/"+o.Name, "//", "/", -1)
tl.Debug(logger, "registering health handler path",
tl.Pairs{"path": hp, "backendName": o.Name,
"upstreamPath": o.HealthCheck.Path,
"upstreamVerb": o.HealthCheck.Verb})
metricsRouter.Handle(hp, http.Handler(middleware.WithResourcesContext(client, o, nil, nil, nil, logger, h)))
}
}
return nil
}
// RegisterPathRoutes will take the provided default paths map,
// merge it with any path data in the provided backend options, and then register
// the path routes to the appropriate handler from the provided handlers map
func RegisterPathRoutes(router *mux.Router, handlers map[string]http.Handler,
client backends.Backend, o *bo.Options, c cache.Cache,
defaultPaths map[string]*po.Options, tracers tracing.Tracers,
healthHandlerPath string, logger interface{}) {
if o == nil {
return
}
// get the distributed tracer if configured
var tr *tracing.Tracer
if o != nil {
if t, ok := tracers[o.TracingConfigName]; ok {
tr = t
}
}
decorate := func(po1 *po.Options) http.Handler {
// default base route is the path handler
h := po1.Handler
// attach distributed tracer
if tr != nil {
h = middleware.Trace(tr, h)
}
// attach compression handler
h = encoding.HandleCompression(h, o.CompressibleTypes)
// add Backend, Cache, and Path Configs to the HTTP Request's context
h = middleware.WithResourcesContext(client, o, c, po1, tr, logger, h)
// attach any request rewriters
if len(o.ReqRewriter) > 0 {
h = rewriter.Rewrite(o.ReqRewriter, h)
}
if len(po1.ReqRewriter) > 0 {
h = rewriter.Rewrite(po1.ReqRewriter, h)
}
// decorate frontend prometheus metrics
if !po1.NoMetrics {
h = middleware.Decorate(o.Name, o.Provider, po1.Path, h)
}
return h
}
// This takes the default paths, named like '/api/v1/query' and morphs the name
// into what the router wants, with methods like '/api/v1/query-0000011001', to help
// route sorting. the bitmap provides unique names multiple path entries of the same
// path but with different methods, without impacting true path sorting
pathsWithVerbs := make(map[string]*po.Options)
for _, p := range defaultPaths {
if len(p.Methods) == 0 {
p.Methods = methods.CacheableHTTPMethods()
}
pathsWithVerbs[p.Path+"-"+fmt.Sprintf("%010b", methods.MethodMask(p.Methods...))] = p
}
// now we will iterate through the configured paths, and overlay them on those default paths.
// for rule & alb backend providers, only the default paths are used with no overlay or importable config
if !backends.IsVirtual(o.Provider) {
for k, p := range o.Paths {
if p2, ok := pathsWithVerbs[k]; ok {
p2.Merge(p)
continue
}
p3 := po.New()
p3.Merge(p)
pathsWithVerbs[k] = p3
}
}
plist := make([]string, 0, len(pathsWithVerbs))
deletes := make([]string, 0, len(pathsWithVerbs))
for k, p := range pathsWithVerbs {
if h, ok := handlers[p.HandlerName]; ok && h != nil {
p.Handler = h
plist = append(plist, k)
} else {
tl.Info(logger, "invalid handler name for path",
tl.Pairs{"path": p.Path, "handlerName": p.HandlerName})
deletes = append(deletes, p.Path)
}
}
for _, p := range deletes {
delete(pathsWithVerbs, p)
}
sort.Sort(ByLen(plist))
for i := len(plist)/2 - 1; i >= 0; i-- {
opp := len(plist) - 1 - i
plist[i], plist[opp] = plist[opp], plist[i]
}
or := client.Router().(*mux.Router)
for _, v := range plist {
p := pathsWithVerbs[v]
pathPrefix := "/" + o.Name
handledPath := pathPrefix + p.Path
tl.Debug(logger, "registering backend handler path",
tl.Pairs{"backendName": o.Name, "path": v, "handlerName": p.HandlerName,
"backendHost": o.Host, "handledPath": handledPath, "matchType": p.MatchType,
"frontendHosts": strings.Join(o.Hosts, ",")})
if p.Handler != nil && len(p.Methods) > 0 {
if p.Methods[0] == "*" {
p.Methods = methods.AllHTTPMethods()
}
switch p.MatchType {
case matching.PathMatchTypePrefix:
// Case where we path match by prefix
// Host Header Routing
for _, h := range o.Hosts {
router.PathPrefix(p.Path).Handler(decorate(p)).Methods(p.Methods...).Host(h)
}
if !o.PathRoutingDisabled {
// Path Routing
router.PathPrefix(handledPath).Handler(middleware.StripPathPrefix(pathPrefix,
decorate(p))).Methods(p.Methods...)
}
or.PathPrefix(p.Path).Handler(decorate(p)).Methods(p.Methods...)
default:
// default to exact match
// Host Header Routing
for _, h := range o.Hosts {
router.Handle(p.Path, decorate(p)).Methods(p.Methods...).Host(h)
}
if !o.PathRoutingDisabled {
// Path Routing
router.Handle(handledPath, middleware.StripPathPrefix(pathPrefix,
decorate(p))).Methods(p.Methods...)
}
or.Handle(p.Path, decorate(p)).Methods(p.Methods...)
}
}
}
o.Router = or
o.Paths = pathsWithVerbs
}
// RegisterDefaultBackendRoutes will iterate the Backends and register the default routes
func | (router *mux.Router, bknds backends.Backends,
logger interface{}, tracers tracing.Tracers) {
decorate := func(o *bo.Options, po *po.Options, tr *tracing.Tracer,
c cache.Cache, client backends.Backend) http.Handler {
// default base route is the path handler
h := po.Handler
// attach distributed tracer
if tr != nil {
h = middleware.Trace(tr, h)
}
// add Backend, Cache, and Path Configs to the HTTP Request's context
h = middleware.WithResourcesContext(client, o, c, po, tr, logger, h)
// attach any request rewriters
if len(o.ReqRewriter) > 0 {
h = rewriter.Rewrite(o.ReqRewriter, h)
}
if len(po.ReqRewriter) > 0 {
h = rewriter.Rewrite(po.ReqRewriter, h)
}
// decorate frontend prometheus metrics
if !po.NoMetrics {
h = middleware.Decorate(o.Name, o.Provider, po.Path, h)
}
return h
}
for _, b := range bknds {
o := b.Configuration()
if o.IsDefault {
var tr *tracing.Tracer
if t, ok := tracers[o.TracingConfigName]; ok {
tr = t
}
tl.Info(logger,
"registering default backend handler paths", tl.Pairs{"backendName": o.Name})
for _, p := range o.Paths {
if p.Handler != nil && len(p.Methods) > 0 {
tl.Debug(logger, "registering default backend handler paths",
tl.Pairs{"backendName": o.Name, "path": p.Path, "handlerName": p.HandlerName,
"matchType": p.MatchType})
switch p.MatchType {
case matching.PathMatchTypePrefix:
// Case where we path match by prefix
router.PathPrefix(p.Path).Handler(decorate(o, p, tr, b.Cache(), b)).Methods(p.Methods...)
default:
// default to exact match
router.Handle(p.Path, decorate(o, p, tr, b.Cache(), b)).Methods(p.Methods...)
}
router.Handle(p.Path, decorate(o, p, tr, b.Cache(), b)).Methods(p.Methods...)
}
}
}
}
}
// ByLen allows sorting of a string slice by string length
type ByLen []string
func (a ByLen) Len() int {
return len(a)
}
func (a ByLen) Less(i, j int) bool {
return len(a[i]) < len(a[j])
}
func (a ByLen) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
| RegisterDefaultBackendRoutes |
theme.go | package data
import (
"context"
app "mall-go/api/app/service"
"mall-go/app/app/service/internal/biz"
"mall-go/app/app/service/internal/data/model/theme"
"github.com/go-kratos/kratos/v2/log"
)
type themeRepo struct {
data *Data
log *log.Helper
}
func NewThemeRepo(data *Data, logger log.Logger) biz.ThemeRepo {
return &themeRepo{
data: data,
log: log.NewHelper(logger),
}
}
func (r *themeRepo) GetThemeByName(ctx context.Context, name string) (t biz.ThemeSpu, err error) {
po, err := r.data.db.Theme.Query().Where(theme.Name(name)).WithThemeSpu().First(ctx)
if err != nil {
return
}
theme := biz.Theme{
Id: po.ID,
Name: po.Name,
Title: po.Title,
Description: po.Description,
EntranceImg: po.EntranceImg,
InternalTopImg: po.InternalTopImg,
TitleImg: po.TitleImg,
TplName: po.TplName,
Online: int32(po.Online),
}
var spuIds []int64
for _, spu := range po.Edges.ThemeSpu {
spuIds = append(spuIds, spu.SpuID)
}
return biz.ThemeSpu{
Theme: theme, SpuIds: spuIds,
}, nil
}
func (r *themeRepo) GetThemeByNames(ctx context.Context, names []string) (themes []biz.Theme, err error) {
pos, err := r.data.db.Theme.Query().Where(theme.NameIn(names...)).All(ctx)
if err != nil |
for _, po := range pos {
themes = append(themes, biz.Theme{
Id: po.ID,
Name: po.Name,
Title: po.Title,
Description: po.Description,
EntranceImg: po.EntranceImg,
InternalTopImg: po.InternalTopImg,
TitleImg: po.TitleImg,
TplName: po.TplName,
Online: int32(po.Online),
})
}
return themes, nil
}
func (r *themeRepo) CreateTheme(ctx context.Context, req app.Theme) (err error) {
return
}
func (r *themeRepo) ListTheme(ctx context.Context) (t []biz.Theme, err error) {
return
}
func (r *themeRepo) UpdateTheme(ctx context.Context, req app.Theme) (err error) {
return
}
| {
return nil, err
} |
simpleSupportedVSCurrencies.go | package main
import (
"fmt"
"log"
gecko "github.com/superoo7/go-gecko/v3"
)
func main() {
cg := gecko.NewClient(nil) | currencies, err := cg.SimpleSupportedVSCurrencies()
if err != nil {
log.Fatal(err)
}
fmt.Println("Total currencies", len(*currencies))
fmt.Println(*currencies)
} | |
mod.rs | #[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::IFC {
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
}
#[doc = r" Proxy"]
pub struct _HFRCORDYW<'a> {
w: &'a mut W,
}
impl<'a> _HFRCORDYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn | (self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _HFXORDYW<'a> {
w: &'a mut W,
}
impl<'a> _HFXORDYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _LFRCORDYW<'a> {
w: &'a mut W,
}
impl<'a> _LFRCORDYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _LFXORDYW<'a> {
w: &'a mut W,
}
impl<'a> _LFXORDYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _AUXHFRCORDYW<'a> {
w: &'a mut W,
}
impl<'a> _AUXHFRCORDYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CALRDYW<'a> {
w: &'a mut W,
}
impl<'a> _CALRDYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _CALOFW<'a> {
w: &'a mut W,
}
impl<'a> _CALOFW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _USHFRCORDYW<'a> {
w: &'a mut W,
}
impl<'a> _USHFRCORDYW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = r" Proxy"]
pub struct _USBCHFOSCSELW<'a> {
w: &'a mut W,
}
impl<'a> _USBCHFOSCSELW<'a> {
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - HFRCO Ready Interrupt Flag Clear"]
#[inline]
pub fn hfrcordy(&mut self) -> _HFRCORDYW {
_HFRCORDYW { w: self }
}
#[doc = "Bit 1 - HFXO Ready Interrupt Flag Clear"]
#[inline]
pub fn hfxordy(&mut self) -> _HFXORDYW {
_HFXORDYW { w: self }
}
#[doc = "Bit 2 - LFRCO Ready Interrupt Flag Clear"]
#[inline]
pub fn lfrcordy(&mut self) -> _LFRCORDYW {
_LFRCORDYW { w: self }
}
#[doc = "Bit 3 - LFXO Ready Interrupt Flag Clear"]
#[inline]
pub fn lfxordy(&mut self) -> _LFXORDYW {
_LFXORDYW { w: self }
}
#[doc = "Bit 4 - AUXHFRCO Ready Interrupt Flag Clear"]
#[inline]
pub fn auxhfrcordy(&mut self) -> _AUXHFRCORDYW {
_AUXHFRCORDYW { w: self }
}
#[doc = "Bit 5 - Calibration Ready Interrupt Flag Clear"]
#[inline]
pub fn calrdy(&mut self) -> _CALRDYW {
_CALRDYW { w: self }
}
#[doc = "Bit 6 - Calibration Overflow Interrupt Flag Clear"]
#[inline]
pub fn calof(&mut self) -> _CALOFW {
_CALOFW { w: self }
}
#[doc = "Bit 8 - USHFRCO Ready Interrupt Flag Clear"]
#[inline]
pub fn ushfrcordy(&mut self) -> _USHFRCORDYW {
_USHFRCORDYW { w: self }
}
#[doc = "Bit 9 - USBC HF-oscillator Selected Interrupt Flag Clear"]
#[inline]
pub fn usbchfoscsel(&mut self) -> _USBCHFOSCSELW {
_USBCHFOSCSELW { w: self }
}
}
| clear_bit |
_Cisco_IOS_XR_aaa_protocol_radius_oper.py | import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Radius.Nodes.Node.Client' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Client',
False,
[
_MetaInfoClassMember('authentication-nas-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' NAS-Identifier of the RADIUS authentication
client
''',
'authentication_nas_id',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-accounting-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of RADIUS accounting responses packets
received from unknown addresses
''',
'unknown_accounting_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-authentication-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of RADIUS access responses packets
received from unknown addresses
''',
'unknown_authentication_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'client',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time',
False,
[
_MetaInfoClassMember('is-computed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if computed; false if not
''',
'is_computed',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Value for time or tries
''',
'value',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'time',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries',
False,
[
_MetaInfoClassMember('is-computed', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if computed; false if not
''',
'is_computed',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('value', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Value for time or tries
''',
'value',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'tries',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria.Hosts.Host' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria.Hosts.Host',
False,
[
_MetaInfoClassMember('acct-port-number', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Accounting Port number (standard port 1646)
''',
'acct_port_number',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('auth-port-number', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Authentication Port number (standard port
1645)
''',
'auth_port_number',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('ip-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP address of RADIUS server
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False, [
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address of RADIUS server
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IP address of RADIUS server
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
]),
_MetaInfoClassMember('time', REFERENCE_CLASS, 'Time' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time',
[], [],
''' Time in seconds
''',
'time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('tries', REFERENCE_CLASS, 'Tries' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries',
[], [],
''' Number of tries
''',
'tries',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'host',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria.Hosts' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria.Hosts',
False,
[
_MetaInfoClassMember('host', REFERENCE_LIST, 'Host' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria.Hosts.Host',
[], [],
''' RADIUS Server
''',
'host',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'hosts',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DeadCriteria' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DeadCriteria',
False,
[
_MetaInfoClassMember('hosts', REFERENCE_CLASS, 'Hosts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria.Hosts',
[], [],
''' RADIUS server dead criteria host table
''',
'hosts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'dead-criteria',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_',
False,
[
_MetaInfoClassMember('access-accepts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access accepts
''',
'access_accepts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-challenges', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access challenges
''',
'access_challenges',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-rejects', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access rejects
''',
'access_rejects',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-request-retransmits', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of retransmitted access requests
''',
'access_request_retransmits',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access requests
''',
'access_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-timeouts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access packets timed out
''',
'access_timeouts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-incorrect-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of incorrect authentication responses
''',
'authen_incorrect_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-response-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average response time for authentication
requests
''',
'authen_response_time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-server-error-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of server error authentication responses
''',
'authen_server_error_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-transaction-failure', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of failed authentication transactions
''',
'authen_transaction_failure',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-transaction-successess', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of succeeded authentication transactions
''',
'authen_transaction_successess',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-unexpected-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unexpected authentication responses
''',
'authen_unexpected_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-access-authenticators', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad access authenticators
''',
'bad_access_authenticators',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-access-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad access responses
''',
'bad_access_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('dropped-access-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access responses dropped
''',
'dropped_access_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('pending-access-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of pending access requests
''',
'pending_access_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('rtt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Round trip time for authentication in
milliseconds
''',
'rtt',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-access-types', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of packets received with unknown type
from authentication server
''',
'unknown_access_types',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'authentication',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Authentication.AuthenticationGroup' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Authentication.AuthenticationGroup',
False,
[
_MetaInfoClassMember('authentication', REFERENCE_CLASS, 'Authentication_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_',
[], [],
''' Authentication data
''',
'authentication',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('family', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP address Family
''',
'family',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP address buffer
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Authentication port number
''',
'port',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('server-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address of RADIUS server
''',
'server_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'authentication-group',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Authentication' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Authentication',
False,
[
_MetaInfoClassMember('authentication-group', REFERENCE_LIST, 'AuthenticationGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Authentication.AuthenticationGroup',
[], [],
''' List of authentication groups
''',
'authentication_group',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'authentication',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_',
False,
[
_MetaInfoClassMember('acct-incorrect-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of incorrect accounting responses
''',
'acct_incorrect_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-response-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average response time for authentication
requests
''',
'acct_response_time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-server-error-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of server error accounting responses
''',
'acct_server_error_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-transaction-failure', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of failed authentication transactions
''',
'acct_transaction_failure',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-transaction-successess', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of succeeded authentication transactions
''',
'acct_transaction_successess',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-unexpected-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unexpected accounting responses
''',
'acct_unexpected_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-authenticators', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad accounting authenticators
''',
'bad_authenticators',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False), | ''',
'bad_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('dropped-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting responses dropped
''',
'dropped_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('pending-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of pending accounting requests
''',
'pending_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting requests
''',
'requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting responses
''',
'responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('retransmits', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of retransmitted accounting requests
''',
'retransmits',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('rtt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Round trip time for accounting in milliseconds
''',
'rtt',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('timeouts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting packets timed-out
''',
'timeouts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-packet-types', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of packets received with unknown type
from accounting server
''',
'unknown_packet_types',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'accounting',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Accounting.AccountingGroup' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Accounting.AccountingGroup',
False,
[
_MetaInfoClassMember('accounting', REFERENCE_CLASS, 'Accounting_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_',
[], [],
''' Accounting data
''',
'accounting',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('family', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP address Family
''',
'family',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP address buffer
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Accounting port number
''',
'port',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('server-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address of RADIUS server
''',
'server_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'accounting-group',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.Accounting' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.Accounting',
False,
[
_MetaInfoClassMember('accounting-group', REFERENCE_LIST, 'AccountingGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Accounting.AccountingGroup',
[], [],
''' List of accounting groups
''',
'accounting_group',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'accounting',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Accounting' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Accounting',
False,
[
_MetaInfoClassMember('acct-incorrect-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of incorrect accounting responses
''',
'acct_incorrect_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-response-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average response time for authentication
requests
''',
'acct_response_time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-server-error-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of server error accounting responses
''',
'acct_server_error_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-transaction-failure', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of failed authentication transactions
''',
'acct_transaction_failure',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-transaction-successess', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of succeeded authentication transactions
''',
'acct_transaction_successess',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('acct-unexpected-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unexpected accounting responses
''',
'acct_unexpected_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-authenticators', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad accounting authenticators
''',
'bad_authenticators',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad accounting responses
''',
'bad_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('dropped-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting responses dropped
''',
'dropped_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('pending-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of pending accounting requests
''',
'pending_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting requests
''',
'requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting responses
''',
'responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('retransmits', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of retransmitted accounting requests
''',
'retransmits',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('rtt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Round trip time for accounting in milliseconds
''',
'rtt',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('timeouts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of accounting packets timed-out
''',
'timeouts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-packet-types', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of packets received with unknown type
from accounting server
''',
'unknown_packet_types',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'accounting',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authentication' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authentication',
False,
[
_MetaInfoClassMember('access-accepts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access accepts
''',
'access_accepts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-challenges', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access challenges
''',
'access_challenges',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-rejects', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access rejects
''',
'access_rejects',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-request-retransmits', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of retransmitted access requests
''',
'access_request_retransmits',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access requests
''',
'access_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('access-timeouts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access packets timed out
''',
'access_timeouts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-incorrect-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of incorrect authentication responses
''',
'authen_incorrect_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-response-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average response time for authentication
requests
''',
'authen_response_time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-server-error-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of server error authentication responses
''',
'authen_server_error_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-transaction-failure', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of failed authentication transactions
''',
'authen_transaction_failure',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-transaction-successess', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of succeeded authentication transactions
''',
'authen_transaction_successess',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authen-unexpected-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unexpected authentication responses
''',
'authen_unexpected_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-access-authenticators', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad access authenticators
''',
'bad_access_authenticators',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('bad-access-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad access responses
''',
'bad_access_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('dropped-access-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access responses dropped
''',
'dropped_access_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('pending-access-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of pending access requests
''',
'pending_access_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('rtt', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Round trip time for authentication in
milliseconds
''',
'rtt',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('unknown-access-types', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of packets received with unknown type
from authentication server
''',
'unknown_access_types',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'authentication',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authorization' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authorization',
False,
[
_MetaInfoClassMember('author-incorrect-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of incorrect authorization responses
''',
'author_incorrect_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('author-request-timeouts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access packets timed out
''',
'author_request_timeouts',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('author-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of access requests
''',
'author_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('author-response-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Average response time for authorization requests
''',
'author_response_time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('author-server-error-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of server error authorization responses
''',
'author_server_error_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('author-transaction-failure', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of failed authorization transactions
''',
'author_transaction_failure',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('author-transaction-successess', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of succeeded authorization transactions
''',
'author_transaction_successess',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('author-unexpected-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of unexpected authorization responses
''',
'author_unexpected_responses',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'authorization',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_',
False,
[
_MetaInfoClassMember('accounting', REFERENCE_CLASS, 'Accounting' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Accounting',
[], [],
''' Accounting data
''',
'accounting',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('accounting-port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Accounting port
''',
'accounting_port',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authentication', REFERENCE_CLASS, 'Authentication' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authentication',
[], [],
''' Authentication data
''',
'authentication',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authentication-port', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Authentication port
''',
'authentication_port',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authorization', REFERENCE_CLASS, 'Authorization' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authorization',
[], [],
''' Authorization data
''',
'authorization',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('family', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP address Family
''',
'family',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP address buffer
''',
'ip_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('is-private', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if private
''',
'is_private',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('server-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Server address
''',
'server_address',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'server-group',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.ServerGroups.ServerGroup' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.ServerGroups.ServerGroup',
False,
[
_MetaInfoClassMember('server-group-name', ATTRIBUTE, 'str' , None, None,
[], ['[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Group name
''',
'server_group_name',
'Cisco-IOS-XR-aaa-protocol-radius-oper', True),
_MetaInfoClassMember('dead-time', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Dead time in minutes
''',
'dead_time',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('groups', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of groups
''',
'groups',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('server-group', REFERENCE_LIST, 'ServerGroup_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_',
[], [],
''' Server groups
''',
'server_group',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('servers', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of servers
''',
'servers',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'server-group',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.ServerGroups' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.ServerGroups',
False,
[
_MetaInfoClassMember('server-group', REFERENCE_LIST, 'ServerGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.ServerGroups.ServerGroup',
[], [],
''' RADIUS server group data
''',
'server_group',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'server-groups',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node.DynamicAuthorization' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node.DynamicAuthorization',
False,
[
_MetaInfoClassMember('disconnected-invalid-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Invalid disconnected requests
''',
'disconnected_invalid_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('invalid-coa-requests', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Invalid change of authorization requests
''',
'invalid_coa_requests',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'dynamic-authorization',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes.Node' : {
'meta_info' : _MetaInfoClass('Radius.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Node name
''',
'node_name',
'Cisco-IOS-XR-aaa-protocol-radius-oper', True),
_MetaInfoClassMember('accounting', REFERENCE_CLASS, 'Accounting' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Accounting',
[], [],
''' RADIUS accounting data
''',
'accounting',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('authentication', REFERENCE_CLASS, 'Authentication' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Authentication',
[], [],
''' RADIUS authentication data
''',
'authentication',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('client', REFERENCE_CLASS, 'Client' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.Client',
[], [],
''' RADIUS client data
''',
'client',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('dead-criteria', REFERENCE_CLASS, 'DeadCriteria' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DeadCriteria',
[], [],
''' RADIUS dead criteria information
''',
'dead_criteria',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('dynamic-authorization', REFERENCE_CLASS, 'DynamicAuthorization' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.DynamicAuthorization',
[], [],
''' Dynamic authorization data
''',
'dynamic_authorization',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
_MetaInfoClassMember('server-groups', REFERENCE_CLASS, 'ServerGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node.ServerGroups',
[], [],
''' RADIUS server group table
''',
'server_groups',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius.Nodes' : {
'meta_info' : _MetaInfoClass('Radius.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes.Node',
[], [],
''' RADIUS operational data for a particular node
''',
'node',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
'Radius' : {
'meta_info' : _MetaInfoClass('Radius',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper', 'Radius.Nodes',
[], [],
''' Contains all the nodes
''',
'nodes',
'Cisco-IOS-XR-aaa-protocol-radius-oper', False),
],
'Cisco-IOS-XR-aaa-protocol-radius-oper',
'radius',
_yang_ns._namespaces['Cisco-IOS-XR-aaa-protocol-radius-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_aaa_protocol_radius_oper'
),
},
}
_meta_table['Radius.Nodes.Node.DeadCriteria.Hosts.Host.Time']['meta_info'].parent =_meta_table['Radius.Nodes.Node.DeadCriteria.Hosts.Host']['meta_info']
_meta_table['Radius.Nodes.Node.DeadCriteria.Hosts.Host.Tries']['meta_info'].parent =_meta_table['Radius.Nodes.Node.DeadCriteria.Hosts.Host']['meta_info']
_meta_table['Radius.Nodes.Node.DeadCriteria.Hosts.Host']['meta_info'].parent =_meta_table['Radius.Nodes.Node.DeadCriteria.Hosts']['meta_info']
_meta_table['Radius.Nodes.Node.DeadCriteria.Hosts']['meta_info'].parent =_meta_table['Radius.Nodes.Node.DeadCriteria']['meta_info']
_meta_table['Radius.Nodes.Node.Authentication.AuthenticationGroup.Authentication_']['meta_info'].parent =_meta_table['Radius.Nodes.Node.Authentication.AuthenticationGroup']['meta_info']
_meta_table['Radius.Nodes.Node.Authentication.AuthenticationGroup']['meta_info'].parent =_meta_table['Radius.Nodes.Node.Authentication']['meta_info']
_meta_table['Radius.Nodes.Node.Accounting.AccountingGroup.Accounting_']['meta_info'].parent =_meta_table['Radius.Nodes.Node.Accounting.AccountingGroup']['meta_info']
_meta_table['Radius.Nodes.Node.Accounting.AccountingGroup']['meta_info'].parent =_meta_table['Radius.Nodes.Node.Accounting']['meta_info']
_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Accounting']['meta_info'].parent =_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_']['meta_info']
_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authentication']['meta_info'].parent =_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_']['meta_info']
_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_.Authorization']['meta_info'].parent =_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_']['meta_info']
_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup.ServerGroup_']['meta_info'].parent =_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup']['meta_info']
_meta_table['Radius.Nodes.Node.ServerGroups.ServerGroup']['meta_info'].parent =_meta_table['Radius.Nodes.Node.ServerGroups']['meta_info']
_meta_table['Radius.Nodes.Node.Client']['meta_info'].parent =_meta_table['Radius.Nodes.Node']['meta_info']
_meta_table['Radius.Nodes.Node.DeadCriteria']['meta_info'].parent =_meta_table['Radius.Nodes.Node']['meta_info']
_meta_table['Radius.Nodes.Node.Authentication']['meta_info'].parent =_meta_table['Radius.Nodes.Node']['meta_info']
_meta_table['Radius.Nodes.Node.Accounting']['meta_info'].parent =_meta_table['Radius.Nodes.Node']['meta_info']
_meta_table['Radius.Nodes.Node.ServerGroups']['meta_info'].parent =_meta_table['Radius.Nodes.Node']['meta_info']
_meta_table['Radius.Nodes.Node.DynamicAuthorization']['meta_info'].parent =_meta_table['Radius.Nodes.Node']['meta_info']
_meta_table['Radius.Nodes.Node']['meta_info'].parent =_meta_table['Radius.Nodes']['meta_info']
_meta_table['Radius.Nodes']['meta_info'].parent =_meta_table['Radius']['meta_info'] | _MetaInfoClassMember('bad-responses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of bad accounting responses |
create-user_test.go | package test_presenters
import (
"testing"
"time"
"github.com/AndreyArthur/oganessone/src/application/definitions"
mock_definitions "github.com/AndreyArthur/oganessone/src/application/definitions/mocks"
"github.com/AndreyArthur/oganessone/src/core/dtos"
"github.com/AndreyArthur/oganessone/src/core/entities"
"github.com/AndreyArthur/oganessone/src/core/shared"
"github.com/AndreyArthur/oganessone/src/infrastructure/helpers"
"github.com/AndreyArthur/oganessone/src/presentation/contracts"
"github.com/AndreyArthur/oganessone/src/presentation/presenters"
"github.com/AndreyArthur/oganessone/tests/helpers/verifier"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
type CreateUserPresenterTest struct{}
func (*CreateUserPresenterTest) setup(t *testing.T) (*presenters.CreateUserPresenter, *mock_definitions.MockCreateUser, *gomock.Controller) {
ctrl := gomock.NewController(t)
useCase := mock_definitions.NewMockCreateUser(ctrl)
presenter, _ := presenters.NewCreateUserPresenter(useCase)
return presenter, useCase, ctrl
}
func TestCreateUserPresenter_SuccessCase(t *testing.T) |
func TestCreateUserPresenter_FailureCase(t *testing.T) {
presenter, useCase, ctrl := (&CreateUserPresenterTest{}).setup(t)
defer ctrl.Finish()
username, email, password :=
"username",
"[email protected]",
"$2a$10$KtwHGGRiKWRDEq/g/2RAguaqIqU7iJNM11aFeqcwzDhuv9jDY35uW"
useCase.EXPECT().
Execute(&definitions.CreateUserDTO{
Username: username,
Email: email,
Password: password,
}).
Return(nil, &shared.Error{})
// act
result, err := presenter.Handle(&contracts.CreateUserPresenterRequest{
Body: &contracts.CreateUserPresenterRequestBody{
Username: username,
Email: email,
Password: password,
},
})
// assert
assert.Nil(t, result)
assert.Equal(t, err, &shared.Error{})
}
| {
// arrange
presenter, useCase, ctrl := (&CreateUserPresenterTest{}).setup(t)
defer ctrl.Finish()
uuid, _ := helpers.NewUuid()
now := time.Now().UTC()
id, username, email, password, createdAt, updatedAt :=
uuid.Generate(),
"username",
"[email protected]",
"$2a$10$KtwHGGRiKWRDEq/g/2RAguaqIqU7iJNM11aFeqcwzDhuv9jDY35uW",
now,
now
entity, _ := entities.NewUserEntity(&dtos.UserDTO{
Id: id,
Username: username,
Email: email,
Password: password,
CreatedAt: createdAt,
UpdatedAt: updatedAt,
})
useCase.EXPECT().
Execute(&definitions.CreateUserDTO{
Username: username,
Email: email,
Password: password,
}).
Return(entity, nil)
// act
result, err := presenter.Handle(&contracts.CreateUserPresenterRequest{
Body: &contracts.CreateUserPresenterRequestBody{
Username: username,
Email: email,
Password: password,
},
})
// assert
assert.Nil(t, err)
assert.True(t, verifier.IsUuid(result.Body.Id))
assert.True(t, verifier.IsUserUsername(result.Body.Username))
assert.True(t, verifier.IsEmail(result.Body.Email))
assert.True(t, verifier.IsISO8601(result.Body.CreatedAt))
assert.True(t, verifier.IsISO8601(result.Body.UpdatedAt))
} |
tests.rs | use crate::db::HirDatabase;
use crate::db::SourceDatabase;
use crate::mock::MockDatabase;
use std::sync::Arc;
/// This function tests that the ModuleData of a module does not change if the contents of a function
/// is changed.
#[test]
fn check_module_data_does_not_change() | {
let (mut db, file_id) = MockDatabase::with_single_file(
r#"
fn foo()->i32 {
1+1
}
"#,
);
{
let events = db.log_executed(|| {
db.module_data(file_id);
});
assert!(
format!("{:?}", events).contains("module_data"),
"{:#?}",
events
)
}
db.set_file_text(
file_id,
Arc::new(
r#"
fn foo()->i32 {
90
}
"#
.to_owned(),
),
);
{
let events = db.log_executed(|| {
db.module_data(file_id);
});
assert!(
!format!("{:?}", events).contains("module_data"),
"{:#?}",
events
)
}
} |
|
group_joined_event.py | # To use this code, make sure you
#
# import json
#
# and then, to convert JSON from a string, do
#
# result = group_joined_event_from_dict(json.loads(json_string))
from dataclasses import dataclass
from typing import Any, Optional, TypeVar, Type, cast
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def | (fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
@dataclass
class Channel:
pass
@staticmethod
def from_dict(obj: Any) -> 'Channel':
assert isinstance(obj, dict)
return Channel()
def to_dict(self) -> dict:
result: dict = {}
return result
@dataclass
class GroupJoinedEvent:
type: Optional[str] = None
channel: Optional[Channel] = None
@staticmethod
def from_dict(obj: Any) -> 'GroupJoinedEvent':
assert isinstance(obj, dict)
type = from_union([from_str, from_none], obj.get("type"))
channel = from_union([Channel.from_dict, from_none], obj.get("channel"))
return GroupJoinedEvent(type, channel)
def to_dict(self) -> dict:
result: dict = {}
result["type"] = from_union([from_str, from_none], self.type)
result["channel"] = from_union([lambda x: to_class(Channel, x), from_none], self.channel)
return result
def group_joined_event_from_dict(s: Any) -> GroupJoinedEvent:
return GroupJoinedEvent.from_dict(s)
def group_joined_event_to_dict(x: GroupJoinedEvent) -> Any:
return to_class(GroupJoinedEvent, x)
| from_union |
expr.rs | //! This module has logic to translate gRPC structures into the native
//! storage system form by extending the builders for those structures with new
//! traits
//!
//! RPCPredicate --> query::Predicates
//!
//! Aggregates / windows --> query::GroupByAndAggregate
use std::{convert::TryFrom, fmt};
use arrow_deps::datafusion::{
logical_plan::{binary_expr, Expr, Operator},
prelude::*,
};
use generated_types::{
aggregate::AggregateType as RPCAggregateType, node::Comparison as RPCComparison,
node::Logical as RPCLogical, node::Value as RPCValue, read_group_request::Group as RPCGroup,
Aggregate as RPCAggregate, Duration as RPCDuration, Node as RPCNode, Predicate as RPCPredicate,
Window as RPCWindow,
};
use super::{TAG_KEY_FIELD, TAG_KEY_MEASUREMENT};
use query::group_by::{Aggregate as QueryAggregate, GroupByAndAggregate, WindowDuration};
use query::predicate::PredicateBuilder;
use snafu::{ResultExt, Snafu};
use tracing::warn;
#[derive(Debug, Snafu)]
pub enum Error {
#[snafu(display("Error creating aggregate: Unexpected empty aggregate"))]
EmptyAggregate {},
#[snafu(display("Error creating aggregate: Exactly one aggregate is supported, but {} were supplied: {:?}",
aggregates.len(), aggregates))]
AggregateNotSingleton { aggregates: Vec<RPCAggregate> },
#[snafu(display("Error creating aggregate: Unknown aggregate type {}", aggregate_type))]
UnknownAggregate { aggregate_type: i32 },
#[snafu(display("Error creating aggregate: Unknown group type: {}", group_type))]
UnknownGroup { group_type: i32 },
#[snafu(display(
"Incompatible read_group request: Group::None had {} group keys (expected 0)",
num_group_keys
))]
InvalidGroupNone { num_group_keys: usize },
#[snafu(display(
"Incompatible read_group request: Group::By had no group keys (expected at least 1)"
))]
InvalidGroupBy {},
#[snafu(display("Error creating predicate: Unexpected empty predicate: Node"))]
EmptyPredicateNode {},
#[snafu(display("Error creating predicate: Unexpected empty predicate value"))]
EmptyPredicateValue {},
#[snafu(display("Error parsing window bounds: No window specified"))]
EmptyWindow {},
#[snafu(display("Error parsing window bounds duration 'window.every': {}", description))]
InvalidWindowEveryDuration { description: String },
#[snafu(display(
"Error parsing window bounds duration 'window.offset': {}",
description
))]
InvalidWindowOffsetDuration { description: String },
#[snafu(display("Internal error: found measurement tag reference in unexpected location"))]
InternalInvalidMeasurementReference {},
#[snafu(display("Internal error: found field tag reference in unexpected location"))]
InternalInvalidFieldReference {},
#[snafu(display(
"Error creating predicate: Regular expression predicates are not supported: {}",
regexp
))]
RegExpLiteralNotSupported { regexp: String },
#[snafu(display("Error creating predicate: Regular expression predicates are not supported"))]
RegExpNotSupported {},
#[snafu(display(
"Error creating predicate: Not Regular expression predicates are not supported"
))]
NotRegExpNotSupported {},
#[snafu(display("Error creating predicate: StartsWith comparisons not supported"))]
StartsWithNotSupported {},
#[snafu(display(
"Error creating predicate: Unexpected children for predicate: {:?}",
value
))]
UnexpectedChildren { value: RPCValue },
#[snafu(display("Error creating predicate: Unknown logical node type: {}", logical))]
UnknownLogicalNode { logical: i32 },
#[snafu(display(
"Error creating predicate: Unknown comparison node type: {}",
comparison
))]
UnknownComparisonNode { comparison: i32 },
#[snafu(display(
"Error creating predicate: Unsupported number of children in binary operator {:?}: {} (must be 2)",
op,
num_children
))]
UnsupportedNumberOfChildren { op: Operator, num_children: usize },
#[snafu(display("Error converting tag_name to utf8: {}", source))]
ConvertingTagName { source: std::string::FromUtf8Error },
#[snafu(display("Error converting field_name to utf8: {}", source))]
ConvertingFieldName { source: std::string::FromUtf8Error },
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
/// A trait for adding gRPC specific nodes to the generic predicate builder
pub trait AddRPCNode
where
Self: Sized,
{
fn rpc_predicate(self, predicate: Option<RPCPredicate>) -> Result<Self>;
}
impl AddRPCNode for PredicateBuilder {
/// Adds the predicates represented by the Node (predicate tree)
/// into predicates that can be evaluted by the storage system
///
/// RPC predicates can have several different types of 'predicate' embedded
/// in them.
///
/// Predicates on tag value (where a tag is a column)
///
/// Predicates on field value (where field is also a column)
///
/// Predicates on 'measurement name' (encoded as tag_ref=\x00), aka select
/// from a particular table
///
/// Predicates on 'field name' (encoded as tag_ref=\xff), aka select only
/// specific fields
///
/// This code pulls apart the predicates, if any, into a StoragePredicate
/// that breaks the predicate apart
fn rpc_predicate(self, rpc_predicate: Option<RPCPredicate>) -> Result<Self> {
match rpc_predicate {
// no input predicate, is fine
None => Ok(self),
Some(rpc_predicate) => {
match rpc_predicate.root {
None => EmptyPredicateNode {}.fail(),
Some(node) => {
// normalize so the rest of the passes can deal with fewer cases
let node = normalize_node(node)?;
// step one is to flatten any AND tree into a vector of conjucts
let conjuncts = flatten_ands(node, Vec::new())?;
conjuncts.into_iter().try_fold(self, convert_simple_node)
}
}
}
}
}
}
/// cleans up / normalizes the input in preparation for other
/// processing. Noramlizations performed:
///
/// 1. Flatten `None` value nodes with `children` of length 1 (semantically the
/// same as the child itself). Specifically, if the input is:
///
/// ```
/// Node {
/// value: None,
/// children: [child],
/// }
/// ```
///
/// Then the output is:
///
/// ```
/// child
/// ```
fn normalize_node(node: RPCNode) -> Result<RPCNode> {
let RPCNode {
node_type,
children,
value,
} = node;
let mut normalized_children = children
.into_iter()
.map(normalize_node)
.collect::<Result<Vec<_>>>()?;
match (value, normalized_children.len()) {
// Sometimes InfluxQL sends in a RPCNode with 1 child and no value
// which seems some sort of wrapper -- unwrap this case
(None, 1) => Ok(normalized_children.pop().unwrap()),
// It is not clear what None means without exactly one child..
(None, _) => EmptyPredicateValue {}.fail(),
(Some(value), _) => {
// performance any other normalizations needed
Ok(RPCNode {
node_type,
children: normalized_children,
value: Some(value),
})
}
}
}
/// Converts the node and updates the `StoragePredicate` being built, as
/// appropriate
///
/// It recognizes special predicate patterns and pulls them into
/// the fields on `StoragePredicate` for special processing. If no
/// patterns are matched, it falls back to a generic DataFusion Expr
fn convert_simple_node(builder: PredicateBuilder, node: RPCNode) -> Result<PredicateBuilder> {
if let Ok(in_list) = InList::try_from(&node) {
let InList { lhs, value_list } = in_list;
// look for tag or measurement = <values>
if let Some(RPCValue::TagRefValue(tag_name)) = lhs.value {
if tag_name.is_measurement() {
// add the table names as a predicate
return Ok(builder.tables(value_list));
} else if tag_name.is_field() {
return Ok(builder.field_columns(value_list));
}
}
}
// If no special case applies, fall back to generic conversion
let expr = convert_node_to_expr(node)?;
Ok(builder.add_expr(expr))
}
/// converts a tree of (a AND (b AND c)) into [a, b, c]
fn flatten_ands(node: RPCNode, mut dst: Vec<RPCNode>) -> Result<Vec<RPCNode>> {
// try to break it up, if possible
if Some(RPCValue::Logical(RPCLogical::And as i32)) == node.value {
let RPCNode { children, .. } = node;
// try and add each child separately
for child in children {
dst = flatten_ands(child, dst)?;
}
} else {
dst.push(node);
}
Ok(dst)
}
// Represents a predicate like <expr> IN (option1, option2, option3, ....)
//
// use `try_from_node1 to convert a tree like as ((expr = option1) OR (expr =
// option2)) or (expr = option3)) ... into such a form
#[derive(Debug)]
struct InList {
lhs: RPCNode,
value_list: Vec<String>,
}
impl TryFrom<&RPCNode> for InList {
type Error = &'static str;
/// If node represents an OR tree like (expr = option1) OR (expr=option2)...
/// extracts an InList like expr IN (option1, option2)
fn try_from(node: &RPCNode) -> Result<Self, &'static str> {
InListBuilder::default().append(node)?.build()
}
}
impl InList {
fn new(lhs: RPCNode) -> Self {
Self {
lhs,
value_list: Vec::new(),
}
}
}
#[derive(Debug, Default)]
struct InListBuilder {
inner: Option<InList>,
}
impl InListBuilder {
/// given we are converting and expression like (self) OR (rhs)
///
/// attempts to flatten rhs into self
///
/// For example, if we are at self OR (foo = 'bar') and self.lhs
/// is foo, will add 'bar' to value_list
fn append(self, node: &RPCNode) -> Result<Self, &'static str> {
// lhs = rhs
if Some(RPCValue::Comparison(RPCComparison::Equal as i32)) == node.value {
assert_eq!(node.children.len(), 2);
let lhs = &node.children[0];
let rhs = &node.children[1];
self.append_equal(lhs, rhs)
}
// lhs OR rhs
else if Some(RPCValue::Logical(RPCLogical::Or as i32)) == node.value {
assert_eq!(node.children.len(), 2);
let lhs = &node.children[0];
let rhs = &node.children[1];
// recurse down both sides
self.append(lhs).and_then(|s| s.append(rhs))
} else {
Err("Found something other than equal or OR")
}
}
// append lhs = rhs expression, if possible, return None if not
fn append_equal(mut self, lhs: &RPCNode, rhs: &RPCNode) -> Result<Self, &'static str> {
let mut in_list = self
.inner
.take()
.unwrap_or_else(|| InList::new(lhs.clone()));
// lhs = rhs as String
if let Some(RPCValue::StringValue(string_value)) = &rhs.value {
if &in_list.lhs == lhs {
in_list.value_list.push(string_value.clone());
self.inner = Some(in_list);
Ok(self)
} else {
Err("lhs did not match")
}
} else {
Err("rhs wasn't a string")
}
}
// consume self and return the built InList
fn build(self) -> Result<InList, &'static str> {
self.inner.ok_or("No sub expressions found")
}
}
// encodes the magic special bytes that the storage gRPC layer uses to
// encode measurement name and field name as tag
pub trait SpecialTagKeys {
/// Return true if this tag key actually refers to a measurement
/// name (e.g. _measurement or _m)
fn is_measurement(&self) -> bool;
/// Return true if this tag key actually refers to a field
/// name (e.g. _field or _f)
fn is_field(&self) -> bool;
}
impl SpecialTagKeys for Vec<u8> {
fn is_measurement(&self) -> bool {
self.as_slice() == TAG_KEY_MEASUREMENT
}
/// Return true if this tag key actually refers to a field
/// name (e.g. _field or _f)
fn is_field(&self) -> bool {
self.as_slice() == TAG_KEY_FIELD
}
}
// Note that is_field can *NEVER* return true for a `String` because 0xff
// is not a valid UTF-8 character, and thus can not be a valid Rust
// String.
// converts a Node from the RPC layer into a datafusion logical expr
fn convert_node_to_expr(node: RPCNode) -> Result<Expr> {
let RPCNode {
children,
node_type: _,
value,
} = node;
let inputs = children
.into_iter()
.map(convert_node_to_expr)
.collect::<Result<Vec<_>>>()?;
let value = value.expect("Normalization removed all None values");
build_node(value, inputs)
}
fn make_tag_name(tag_name: Vec<u8>) -> Result<String> {
// These should have been handled at a higher level -- if we get
// here it is too late
if tag_name.is_measurement() {
InternalInvalidMeasurementReference.fail()
} else if tag_name.is_field() {
InternalInvalidFieldReference.fail()
} else {
String::from_utf8(tag_name).context(ConvertingTagName)
}
}
// Builds an Expr given the Value and the converted children
fn build_node(value: RPCValue, inputs: Vec<Expr>) -> Result<Expr> {
// Only logical / comparison ops can have inputs.
let can_have_children = matches!(&value, RPCValue::Logical(_) | RPCValue::Comparison(_));
if !can_have_children && !inputs.is_empty() {
return UnexpectedChildren { value }.fail();
}
match value {
RPCValue::StringValue(s) => Ok(lit(s)),
RPCValue::BoolValue(b) => Ok(lit(b)),
RPCValue::IntValue(v) => Ok(lit(v)),
RPCValue::UintValue(v) => Ok(lit(v)),
RPCValue::FloatValue(f) => Ok(lit(f)),
RPCValue::RegexValue(regexp) => RegExpLiteralNotSupported { regexp }.fail(),
RPCValue::TagRefValue(tag_name) => Ok(col(&make_tag_name(tag_name)?)),
RPCValue::FieldRefValue(field_name) => Ok(col(&field_name)),
RPCValue::Logical(logical) => build_logical_node(logical, inputs),
RPCValue::Comparison(comparison) => build_comparison_node(comparison, inputs),
}
}
/// Creates an expr from a "Logical" Node
fn build_logical_node(logical: i32, inputs: Vec<Expr>) -> Result<Expr> {
// This ideally could be a match, but I couldn't find a safe way
// to match an i32 to RPCLogical except for ths
if logical == RPCLogical::And as i32 {
build_binary_expr(Operator::And, inputs)
} else if logical == RPCLogical::Or as i32 {
build_binary_expr(Operator::Or, inputs)
} else {
UnknownLogicalNode { logical }.fail()
}
}
/// Creates an expr from a "Comparsion" Node
fn build_comparison_node(comparison: i32, inputs: Vec<Expr>) -> Result<Expr> {
// again, this would ideally be a match but I couldn't figure out how to
// match an i32 to the enum values
if comparison == RPCComparison::Equal as i32 {
build_binary_expr(Operator::Eq, inputs)
} else if comparison == RPCComparison::NotEqual as i32 {
build_binary_expr(Operator::NotEq, inputs)
} else if comparison == RPCComparison::StartsWith as i32 {
StartsWithNotSupported {}.fail()
} else if comparison == RPCComparison::Regex as i32 {
RegExpNotSupported {}.fail()
} else if comparison == RPCComparison::NotRegex as i32 {
NotRegExpNotSupported {}.fail()
} else if comparison == RPCComparison::Lt as i32 {
build_binary_expr(Operator::Lt, inputs)
} else if comparison == RPCComparison::Lte as i32 {
build_binary_expr(Operator::LtEq, inputs)
} else if comparison == RPCComparison::Gt as i32 {
build_binary_expr(Operator::Gt, inputs)
} else if comparison == RPCComparison::Gte as i32 {
build_binary_expr(Operator::GtEq, inputs)
} else {
UnknownComparisonNode { comparison }.fail()
}
}
/// Creates a datafusion binary expression with the specified operator
fn build_binary_expr(op: Operator, inputs: Vec<Expr>) -> Result<Expr> {
// convert input vector to options so we can "take" elements out of it
let mut inputs = inputs.into_iter().map(Some).collect::<Vec<_>>();
let num_children = inputs.len();
match num_children {
2 => Ok(binary_expr(
inputs[0].take().unwrap(),
op,
inputs[1].take().unwrap(),
)),
_ => UnsupportedNumberOfChildren { op, num_children }.fail(),
}
}
pub fn make_read_group_aggregate(
aggregate: Option<RPCAggregate>,
group: RPCGroup,
group_keys: Vec<String>,
) -> Result<GroupByAndAggregate> {
// validate Group setting
match group {
// Group:None is invalid if grouping keys are specified
RPCGroup::None if !group_keys.is_empty() => InvalidGroupNone {
num_group_keys: group_keys.len(),
}
.fail(),
// Group:By is invalid if no grouping keys are specified
RPCGroup::By if group_keys.is_empty() => InvalidGroupBy {}.fail(),
_ => Ok(()),
}?;
let gby_agg = GroupByAndAggregate::Columns {
agg: convert_aggregate(aggregate)?,
group_columns: group_keys,
};
Ok(gby_agg)
}
/// Builds GroupByAndAggregate::Windows
pub fn make_read_window_aggregate(
aggregates: Vec<RPCAggregate>,
window_every: i64,
offset: i64,
window: Option<RPCWindow>,
) -> Result<GroupByAndAggregate> {
// only support single aggregate for now
if aggregates.len() != 1 {
return AggregateNotSingleton { aggregates }.fail();
}
let agg = convert_aggregate(aggregates.into_iter().next())?;
// Translation from these parameters to window bound
// is defined in the Go code:
// https://github.com/influxdata/idpe/pull/8636/files#diff-94c0a8d7e427e2d7abe49f01dced50ad776b65ec8f2c8fb2a2c8b90e2e377ed5R82
//
// Quoting:
//
// Window and the WindowEvery/Offset should be mutually
// exclusive. If you set either the WindowEvery or Offset with
// nanosecond values, then the Window will be ignored
let (every, offset) = match (window, window_every, offset) {
(None, 0, 0) => return EmptyWindow {}.fail(),
(Some(window), 0, 0) => (
convert_duration(window.every, DurationValidation::ForbidZero).map_err(|e| {
Error::InvalidWindowEveryDuration {
description: e.into(),
}
})?,
convert_duration(window.offset, DurationValidation::AllowZero).map_err(|e| {
Error::InvalidWindowOffsetDuration {
description: e.into(),
}
})?,
),
(window, window_every, offset) => {
// warn if window is being ignored
if window.is_some() {
warn!("window_every {} or offset {} was non zero, so ignoring window specification '{:?}' on read_window_aggregate",
window_every, offset, window);
}
(
WindowDuration::from_nanoseconds(window_every),
WindowDuration::from_nanoseconds(offset),
)
}
};
Ok(GroupByAndAggregate::Window { agg, every, offset })
}
enum DurationValidation {
/// Zero windows are allowed
AllowZero,
/// Zero windows are not allowed
ForbidZero,
}
/// Convert the RPC input to an IOx WindowDuration
/// structure. `zero_validation` specifies what to do if the window is empty
fn convert_duration(
duration: Option<RPCDuration>,
zero_validation: DurationValidation,
) -> Result<WindowDuration, &'static str> {
let duration = duration.ok_or("No duration specified in RPC")?;
match (duration.nsecs, duration.months, zero_validation) {
// Same error as Go code: https://github.com/influxdata/flux/blob/master/execute/window.go#L36
(0, 0, DurationValidation::ForbidZero) => {
Err("duration used as an interval cannot be zero")
}
(0, 0, DurationValidation::AllowZero) => Ok(WindowDuration::empty()),
(nsecs, 0, _) => Ok(WindowDuration::from_nanoseconds(nsecs)),
(0, _, _) => Ok(WindowDuration::from_months(
duration.months,
duration.negative,
)),
(_, _, _) => Err("duration used as an interval cannot mix month and nanosecond units"),
}
}
fn convert_aggregate(aggregate: Option<RPCAggregate>) -> Result<QueryAggregate> {
let aggregate = match aggregate {
None => return EmptyAggregate {}.fail(),
Some(aggregate) => aggregate,
};
let aggregate_type = aggregate.r#type;
if aggregate_type == RPCAggregateType::None as i32 {
Ok(QueryAggregate::None)
} else if aggregate_type == RPCAggregateType::Sum as i32 {
Ok(QueryAggregate::Sum)
} else if aggregate_type == RPCAggregateType::Count as i32 {
Ok(QueryAggregate::Count)
} else if aggregate_type == RPCAggregateType::Min as i32 {
Ok(QueryAggregate::Min)
} else if aggregate_type == RPCAggregateType::Max as i32 {
Ok(QueryAggregate::Max)
} else if aggregate_type == RPCAggregateType::First as i32 {
Ok(QueryAggregate::First)
} else if aggregate_type == RPCAggregateType::Last as i32 {
Ok(QueryAggregate::Last)
} else if aggregate_type == RPCAggregateType::Mean as i32 {
Ok(QueryAggregate::Mean)
} else {
UnknownAggregate { aggregate_type }.fail()
}
}
pub fn convert_group_type(group: i32) -> Result<RPCGroup> {
if group == RPCGroup::None as i32 {
Ok(RPCGroup::None)
} else if group == RPCGroup::By as i32 {
Ok(RPCGroup::By)
} else {
UnknownGroup { group_type: group }.fail()
}
}
/// Creates a representation of some struct (in another crate that we
/// don't control) suitable for logging with `std::fmt::Display`)
pub trait Loggable<'a> {
fn loggable(&'a self) -> Box<dyn fmt::Display + 'a>;
}
impl<'a> Loggable<'a> for Option<RPCPredicate> {
fn loggable(&'a self) -> Box<dyn fmt::Display + 'a> {
Box::new(displayable_predicate(self.as_ref()))
}
}
impl<'a> Loggable<'a> for RPCPredicate {
fn loggable(&'a self) -> Box<dyn fmt::Display + 'a> {
Box::new(displayable_predicate(Some(self)))
}
}
/// Returns a struct that can format gRPC predicate (aka `RPCPredicates`) for
/// Display
///
/// For example:
/// let pred = RPCPredicate (...);
/// println!("The predicate is {:?}", loggable_predicate(pred));
pub fn displayable_predicate(pred: Option<&RPCPredicate>) -> impl fmt::Display + '_ {
struct Wrapper<'a>(Option<&'a RPCPredicate>);
impl<'a> fmt::Display for Wrapper<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
None => write!(f, "<NONE>"),
Some(pred) => format_predicate(pred, f),
}
}
}
Wrapper(pred)
}
fn format_predicate<'a>(pred: &'a RPCPredicate, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &pred.root {
Some(r) => format_node(r, f),
None => write!(f, "root: <NONE>"),
}
}
fn format_node<'a>(node: &'a RPCNode, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let value = match &node.value {
None => {
write!(f, "node: NONE")?;
return Ok(());
}
Some(value) => value,
};
match node.children.len() {
0 => {
format_value(value, f)?;
}
// print using infix notation
// (child0 <op> child1)
2 => {
write!(f, "(")?;
format_node(&node.children[0], f)?;
write!(f, " ")?;
format_value(value, f)?;
write!(f, " ")?;
format_node(&node.children[1], f)?;
write!(f, ")")?;
}
// print func notation
// <op>(child0, chold1, ...)
_ => {
format_value(value, f)?;
write!(f, "(")?;
for (i, child) in node.children.iter().enumerate() {
if i > 0 {
write!(f, ", ")?;
}
format_node(child, f)?;
}
write!(f, ")")?;
}
};
Ok(())
}
fn format_value<'a>(value: &'a RPCValue, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use RPCValue::*;
match value {
StringValue(s) => write!(f, "\"{}\"", s),
BoolValue(b) => write!(f, "{}", b),
IntValue(i) => write!(f, "{}", i),
UintValue(u) => write!(f, "{}", u),
FloatValue(fval) => write!(f, "{}", fval),
RegexValue(r) => write!(f, "RegEx:{}", r),
TagRefValue(bytes) => {
let temp = String::from_utf8_lossy(bytes);
let sval = match bytes.as_slice() {
TAG_KEY_MEASUREMENT => "_m[0x00]",
TAG_KEY_FIELD => "_f[0xff]",
_ => &temp,
};
write!(f, "TagRef:{}", sval)
}
FieldRefValue(d) => write!(f, "FieldRef:{}", d),
Logical(v) => format_logical(*v, f),
Comparison(v) => format_comparison(*v, f),
}
}
fn format_logical(v: i32, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if v == RPCLogical::And as i32 { | } else if v == RPCLogical::Or as i32 {
write!(f, "Or")
} else {
write!(f, "UNKNOWN_LOGICAL:{}", v)
}
}
fn format_comparison(v: i32, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if v == RPCComparison::Equal as i32 {
write!(f, "==")
} else if v == RPCComparison::NotEqual as i32 {
write!(f, "!=")
} else if v == RPCComparison::StartsWith as i32 {
write!(f, "StartsWith")
} else if v == RPCComparison::Regex as i32 {
write!(f, "RegEx")
} else if v == RPCComparison::NotRegex as i32 {
write!(f, "NotRegex")
} else if v == RPCComparison::Lt as i32 {
write!(f, "<")
} else if v == RPCComparison::Lte as i32 {
write!(f, "<=")
} else if v == RPCComparison::Gt as i32 {
write!(f, ">")
} else if v == RPCComparison::Gte as i32 {
write!(f, ">=")
} else {
write!(f, "UNKNOWN_COMPARISON:{}", v)
}
}
#[cfg(test)]
mod tests {
use generated_types::node::Type as RPCNodeType;
use std::collections::BTreeSet;
use super::*;
#[test]
fn test_convert_predicate_none() {
let predicate = PredicateBuilder::default()
.rpc_predicate(None)
.unwrap()
.build();
assert!(predicate.exprs.is_empty());
}
#[test]
fn test_convert_predicate_empty() {
let rpc_predicate = RPCPredicate { root: None };
let res = PredicateBuilder::default().rpc_predicate(Some(rpc_predicate));
let expected_error = "Unexpected empty predicate: Node";
let actual_error = error_result_to_string(res);
assert!(
actual_error.contains(expected_error),
"expected '{}' not found in '{}'",
expected_error,
actual_error
);
}
#[test]
fn test_convert_predicate_good() {
let (comparison, expected_expr) = make_host_comparison();
let rpc_predicate = RPCPredicate {
root: Some(comparison),
};
let predicate = PredicateBuilder::default()
.rpc_predicate(Some(rpc_predicate))
.expect("successfully converting predicate")
.build();
assert_eq!(predicate.exprs.len(), 1);
let converted_expr = &predicate.exprs[0];
// compare the expression using their string representations
// as Expr can't be compared directly.
let converted_expr = format!("{:?}", converted_expr);
let expected_expr = format!("{:?}", expected_expr);
assert_eq!(
expected_expr, converted_expr,
"expected '{:#?}' doesn't match actual '{:#?}'",
expected_expr, converted_expr
);
}
#[test]
fn test_convert_predicate_no_children() {
let comparison = RPCNode {
node_type: RPCNodeType::ComparisonExpression as i32,
children: vec![],
value: Some(RPCValue::Comparison(RPCComparison::Gt as i32)),
};
let rpc_predicate = RPCPredicate {
root: Some(comparison),
};
let res = PredicateBuilder::default().rpc_predicate(Some(rpc_predicate));
let expected_error = "Error creating predicate: Unsupported number of children in binary operator Gt: 0 (must be 2)";
let actual_error = error_result_to_string(res);
assert!(
actual_error.contains(expected_error),
"expected '{}' not found in '{}'",
expected_error,
actual_error
);
}
#[test]
fn test_convert_predicate_comparison_bad_values() {
// Send in invalid input to simulate a bad actor
let iconst = RPCNode {
node_type: RPCNodeType::Literal as i32,
children: vec![],
value: Some(RPCValue::FloatValue(5.0)),
};
let comparison = RPCNode {
node_type: RPCNodeType::ComparisonExpression as i32,
children: vec![iconst.clone(), iconst],
value: Some(RPCValue::Comparison(42)), // 42 is not a valid comparison value
};
let rpc_predicate = RPCPredicate {
root: Some(comparison),
};
let res = PredicateBuilder::default().rpc_predicate(Some(rpc_predicate));
let expected_error = "Error creating predicate: Unknown comparison node type: 42";
let actual_error = error_result_to_string(res);
assert!(
actual_error.contains(expected_error),
"expected '{}' not found in '{}'",
expected_error,
actual_error
);
}
#[test]
fn test_convert_predicate_logical_bad_values() {
// Send in invalid input to simulate a bad actor
let iconst = RPCNode {
node_type: RPCNodeType::Literal as i32,
children: vec![],
value: Some(RPCValue::FloatValue(5.0)),
};
let comparison = RPCNode {
node_type: RPCNodeType::LogicalExpression as i32,
children: vec![iconst.clone(), iconst],
value: Some(RPCValue::Logical(42)), // 42 is not a valid logical value
};
let rpc_predicate = RPCPredicate {
root: Some(comparison),
};
let res = PredicateBuilder::default().rpc_predicate(Some(rpc_predicate));
let expected_error = "Error creating predicate: Unknown logical node type: 42";
let actual_error = error_result_to_string(res);
assert!(
actual_error.contains(expected_error),
"expected '{}' not found in '{}'",
expected_error,
actual_error
);
}
#[test]
fn test_convert_predicate_field_selection() {
let field_selection = make_field_ref_node("field1");
let rpc_predicate = RPCPredicate {
root: Some(field_selection),
};
let predicate = PredicateBuilder::default()
.rpc_predicate(Some(rpc_predicate))
.unwrap()
.build();
assert!(predicate.exprs.is_empty());
assert!(predicate.table_names.is_none());
assert_eq!(predicate.field_columns, Some(to_set(&["field1"])));
assert!(predicate.range.is_none());
}
#[test]
fn test_convert_predicate_field_selection_wrapped() {
// test wrapping the whole predicate in a None value (aka what influxql does for
// some reason
let field_selection = make_field_ref_node("field1");
let wrapped = RPCNode {
node_type: RPCNodeType::ParenExpression as i32,
children: vec![field_selection],
value: None,
};
let rpc_predicate = RPCPredicate {
root: Some(wrapped),
};
let predicate = PredicateBuilder::default()
.rpc_predicate(Some(rpc_predicate))
.unwrap()
.build();
assert!(predicate.exprs.is_empty());
assert!(predicate.table_names.is_none());
assert_eq!(predicate.field_columns, Some(to_set(&["field1"])));
assert!(predicate.range.is_none());
}
#[test]
fn test_convert_predicate_multiple_field_selection() {
let selection = make_or_node(make_field_ref_node("field1"), make_field_ref_node("field2"));
let selection = make_or_node(selection, make_field_ref_node("field3"));
let rpc_predicate = RPCPredicate {
root: Some(selection),
};
let predicate = PredicateBuilder::default()
.rpc_predicate(Some(rpc_predicate))
.unwrap()
.build();
assert!(predicate.exprs.is_empty());
assert!(predicate.table_names.is_none());
assert_eq!(
predicate.field_columns,
Some(to_set(&["field1", "field2", "field3"]))
);
assert!(predicate.range.is_none());
}
// test multiple field restrictions and a general predicate
#[test]
fn test_convert_predicate_multiple_field_selection_and_predicate() {
let (comparison, expected_expr) = make_host_comparison();
let selection = make_or_node(make_field_ref_node("field1"), make_field_ref_node("field2"));
let selection = make_and_node(selection, comparison);
let rpc_predicate = RPCPredicate {
root: Some(selection),
};
let predicate = PredicateBuilder::default()
.rpc_predicate(Some(rpc_predicate))
.unwrap()
.build();
// compare the expression using their string representations
// as Expr can't be compared directly.
assert_eq!(predicate.exprs.len(), 1);
let converted_expr = format!("{:?}", predicate.exprs[0]);
let expected_expr = format!("{:?}", expected_expr);
assert_eq!(
expected_expr, converted_expr,
"expected '{:#?}' doesn't match actual '{:#?}'",
expected_expr, converted_expr
);
assert!(predicate.table_names.is_none());
assert_eq!(predicate.field_columns, Some(to_set(&["field1", "field2"])));
assert!(predicate.range.is_none());
}
#[test]
fn test_convert_predicate_measurement_selection() {
let measurement_selection = make_measurement_ref_node("m1");
let rpc_predicate = RPCPredicate {
root: Some(measurement_selection),
};
let predicate = PredicateBuilder::default()
.rpc_predicate(Some(rpc_predicate))
.unwrap()
.build();
assert!(predicate.exprs.is_empty());
assert_eq!(predicate.table_names, Some(to_set(&["m1"])));
assert!(predicate.field_columns.is_none());
assert!(predicate.range.is_none());
}
#[test]
fn test_convert_predicate_unsupported_structure() {
// Test (_f = "foo" and host > 5.0) OR (_m = "bar")
// which is not something we know how to do
let (comparison, _) = make_host_comparison();
let unsupported = make_or_node(
make_and_node(make_field_ref_node("foo"), comparison),
make_measurement_ref_node("bar"),
);
let rpc_predicate = RPCPredicate {
root: Some(unsupported),
};
let res = PredicateBuilder::default().rpc_predicate(Some(rpc_predicate));
let expected_error = "Internal error: found field tag reference in unexpected location";
let actual_error = error_result_to_string(res);
assert!(
actual_error.contains(expected_error),
"expected '{}' not found in '{}'",
expected_error,
actual_error
);
}
/// make a _f = 'field_name' type node
fn make_field_ref_node(field_name: impl Into<String>) -> RPCNode {
make_tag_ref_node(&[255], field_name)
}
/// make a _m = 'measurement_name' type node
fn make_measurement_ref_node(field_name: impl Into<String>) -> RPCNode {
make_tag_ref_node(&[0], field_name)
}
/// returns (RPCNode, and expected_expr for the "host > 5.0")
fn make_host_comparison() -> (RPCNode, Expr) {
// host > 5.0
let field_ref = RPCNode {
node_type: RPCNodeType::FieldRef as i32,
children: vec![],
value: Some(RPCValue::FieldRefValue(String::from("host"))),
};
let iconst = RPCNode {
node_type: RPCNodeType::Literal as i32,
children: vec![],
value: Some(RPCValue::FloatValue(5.0)),
};
let comparison = RPCNode {
node_type: RPCNodeType::ComparisonExpression as i32,
children: vec![field_ref, iconst],
value: Some(RPCValue::Comparison(RPCComparison::Gt as i32)),
};
let expected_expr = col("host").gt(lit(5.0));
(comparison, expected_expr)
}
fn make_tag_ref_node(tag_name: &[u8], field_name: impl Into<String>) -> RPCNode {
let field_tag_ref_node = RPCNode {
node_type: RPCNodeType::TagRef as i32,
children: vec![],
value: Some(RPCValue::TagRefValue(tag_name.to_vec())),
};
let string_node = RPCNode {
node_type: RPCNodeType::Literal as i32,
children: vec![],
value: Some(RPCValue::StringValue(field_name.into())),
};
RPCNode {
node_type: RPCNodeType::ComparisonExpression as i32,
children: vec![field_tag_ref_node, string_node],
value: Some(RPCValue::Comparison(RPCComparison::Equal as i32)),
}
}
/// make n1 OR n2
fn make_or_node(n1: RPCNode, n2: RPCNode) -> RPCNode {
RPCNode {
node_type: RPCNodeType::LogicalExpression as i32,
children: vec![n1, n2],
value: Some(RPCValue::Logical(RPCLogical::Or as i32)),
}
}
/// make n1 AND n2
fn make_and_node(n1: RPCNode, n2: RPCNode) -> RPCNode {
RPCNode {
node_type: RPCNodeType::LogicalExpression as i32,
children: vec![n1, n2],
value: Some(RPCValue::Logical(RPCLogical::And as i32)),
}
}
fn to_set(v: &[&str]) -> BTreeSet<String> {
v.iter().map(|s| s.to_string()).collect::<BTreeSet<_>>()
}
/// Return the dislay formay of the resulting error, or
/// 'UNEXPECTED SUCCESS' if `res` is not an error.
fn error_result_to_string<R>(res: Result<R>) -> String {
match res {
Ok(_) => "UNEXPECTED SUCCESS".into(),
Err(e) => format!("{}", e),
}
}
#[test]
fn test_make_read_group_aggregate() {
assert_eq!(
make_read_group_aggregate(Some(make_aggregate(1)), RPCGroup::None, vec![]).unwrap(),
GroupByAndAggregate::Columns {
agg: QueryAggregate::Sum,
group_columns: vec![]
}
);
assert_eq!(
make_read_group_aggregate(Some(make_aggregate(1)), RPCGroup::By, vec!["gcol".into()])
.unwrap(),
GroupByAndAggregate::Columns {
agg: QueryAggregate::Sum,
group_columns: vec!["gcol".into()]
}
);
// error cases
assert_eq!(
make_read_group_aggregate(None, RPCGroup::None, vec![])
.unwrap_err()
.to_string(),
"Error creating aggregate: Unexpected empty aggregate"
);
assert_eq!(
make_read_group_aggregate(Some(make_aggregate(1)), RPCGroup::None, vec!["gcol".into()])
.unwrap_err()
.to_string(),
"Incompatible read_group request: Group::None had 1 group keys (expected 0)"
);
assert_eq!(
make_read_group_aggregate(Some(make_aggregate(1)), RPCGroup::By, vec![])
.unwrap_err()
.to_string(),
"Incompatible read_group request: Group::By had no group keys (expected at least 1)"
);
}
#[test]
fn test_make_read_window_aggregate() {
let pos_5_ns = WindowDuration::from_nanoseconds(5);
let pos_10_ns = WindowDuration::from_nanoseconds(10);
let pos_3_months = WindowDuration::from_months(3, false);
let neg_1_months = WindowDuration::from_months(1, true);
let agg = make_read_window_aggregate(vec![], 5, 10, None);
let expected =
"Error creating aggregate: Exactly one aggregate is supported, but 0 were supplied: []";
assert_eq!(error_result_to_string(agg), expected);
let agg =
make_read_window_aggregate(vec![make_aggregate(1), make_aggregate(2)], 5, 10, None);
let expected = "Error creating aggregate: Exactly one aggregate is supported, but 2 were supplied: [Aggregate { r#type: Sum }, Aggregate { r#type: Count }]";
assert_eq!(error_result_to_string(agg), expected);
// now window specified
let agg = make_read_window_aggregate(vec![make_aggregate(1)], 0, 0, None);
let expected = "Error parsing window bounds: No window specified";
assert_eq!(error_result_to_string(agg), expected);
// correct window + window_every
let agg = make_read_window_aggregate(vec![make_aggregate(1)], 5, 10, None).unwrap();
let expected = make_storage_window(QueryAggregate::Sum, &pos_5_ns, &pos_10_ns);
assert_eq!(agg, expected);
// correct every + offset
let agg = make_read_window_aggregate(
vec![make_aggregate(1)],
0,
0,
Some(make_rpc_window(5, 0, false, 10, 0, false)),
)
.unwrap();
let expected = make_storage_window(QueryAggregate::Sum, &pos_5_ns, &pos_10_ns);
assert_eq!(agg, expected);
// correct every + zero offset
let agg = make_read_window_aggregate(
vec![make_aggregate(1)],
0,
0,
Some(make_rpc_window(5, 0, false, 0, 0, false)),
)
.unwrap();
let expected =
make_storage_window(QueryAggregate::Sum, &pos_5_ns, &WindowDuration::empty());
assert_eq!(agg, expected);
// correct every + offset in months
let agg = make_read_window_aggregate(
vec![make_aggregate(1)],
0,
0,
Some(make_rpc_window(0, 3, false, 0, 1, true)),
)
.unwrap();
let expected = make_storage_window(QueryAggregate::Sum, &pos_3_months, &neg_1_months);
assert_eq!(agg, expected);
// correct every + offset in months
let agg = make_read_window_aggregate(
vec![make_aggregate(1)],
0,
0,
Some(make_rpc_window(0, 1, true, 0, 3, false)),
)
.unwrap();
let expected = make_storage_window(QueryAggregate::Sum, &neg_1_months, &pos_3_months);
assert_eq!(agg, expected);
// both window + window_every and every + offset -- every + offset overrides
// (100 and 200 should be ignored)
let agg = make_read_window_aggregate(
vec![make_aggregate(1)],
5,
10,
Some(make_rpc_window(100, 0, false, 200, 0, false)),
)
.unwrap();
let expected = make_storage_window(QueryAggregate::Sum, &pos_5_ns, &pos_10_ns);
assert_eq!(agg, expected);
// invalid durations
let agg = make_read_window_aggregate(
vec![make_aggregate(1)],
0,
0,
Some(make_rpc_window(5, 1, false, 10, 0, false)),
);
let expected = "Error parsing window bounds duration \'window.every\': duration used as an interval cannot mix month and nanosecond units";
assert_eq!(error_result_to_string(agg), expected);
// invalid durations
let agg = make_read_window_aggregate(
vec![make_aggregate(1)],
0,
0,
Some(make_rpc_window(5, 0, false, 10, 1, false)),
);
let expected = "Error parsing window bounds duration \'window.offset\': duration used as an interval cannot mix month and nanosecond units";
assert_eq!(error_result_to_string(agg), expected);
// invalid durations
let agg = make_read_window_aggregate(
vec![make_aggregate(1)],
0,
0,
Some(make_rpc_window(0, 0, false, 5, 0, false)),
);
let expected = "Error parsing window bounds duration \'window.every\': duration used as an interval cannot be zero";
assert_eq!(error_result_to_string(agg), expected);
}
#[test]
fn test_convert_group_type() {
assert_eq!(convert_group_type(0).unwrap(), RPCGroup::None);
assert_eq!(convert_group_type(2).unwrap(), RPCGroup::By);
assert_eq!(
error_result_to_string(convert_group_type(1)),
"Error creating aggregate: Unknown group type: 1"
);
}
#[test]
fn test_convert_aggregate() {
assert_eq!(
error_result_to_string(convert_aggregate(None)),
"Error creating aggregate: Unexpected empty aggregate"
);
assert_eq!(
convert_aggregate(make_aggregate_opt(0)).unwrap(),
QueryAggregate::None
);
assert_eq!(
convert_aggregate(make_aggregate_opt(1)).unwrap(),
QueryAggregate::Sum
);
assert_eq!(
convert_aggregate(make_aggregate_opt(2)).unwrap(),
QueryAggregate::Count
);
assert_eq!(
convert_aggregate(make_aggregate_opt(3)).unwrap(),
QueryAggregate::Min
);
assert_eq!(
convert_aggregate(make_aggregate_opt(4)).unwrap(),
QueryAggregate::Max
);
assert_eq!(
convert_aggregate(make_aggregate_opt(5)).unwrap(),
QueryAggregate::First
);
assert_eq!(
convert_aggregate(make_aggregate_opt(6)).unwrap(),
QueryAggregate::Last
);
assert_eq!(
convert_aggregate(make_aggregate_opt(7)).unwrap(),
QueryAggregate::Mean
);
assert_eq!(
error_result_to_string(convert_aggregate(make_aggregate_opt(100))),
"Error creating aggregate: Unknown aggregate type 100"
);
}
fn make_aggregate(t: i32) -> RPCAggregate {
RPCAggregate { r#type: t }
}
fn make_aggregate_opt(t: i32) -> Option<RPCAggregate> {
Some(make_aggregate(t))
}
fn make_rpc_window(
every_nsecs: i64,
every_months: i64,
every_negative: bool,
offset_nsecs: i64,
offset_months: i64,
offset_negative: bool,
) -> RPCWindow {
RPCWindow {
every: Some(RPCDuration {
nsecs: every_nsecs,
months: every_months,
negative: every_negative,
}),
offset: Some(RPCDuration {
nsecs: offset_nsecs,
months: offset_months,
negative: offset_negative,
}),
}
}
fn make_storage_window(
agg: QueryAggregate,
every: &WindowDuration,
offset: &WindowDuration,
) -> GroupByAndAggregate {
GroupByAndAggregate::Window {
agg,
every: every.clone(),
offset: offset.clone(),
}
}
#[test]
fn test_displayable_predicate_none() {
let rpc_pred = None;
assert_eq!(
"<NONE>",
format!("{}", displayable_predicate(rpc_pred.as_ref()))
);
}
#[test]
fn test_displayable_predicate_root_none() {
let rpc_pred = Some(RPCPredicate { root: None });
assert_eq!(
"root: <NONE>",
format!("{}", displayable_predicate(rpc_pred.as_ref()))
);
}
#[test]
fn test_displayable_predicate_two_args() {
let (comparison, _) = make_host_comparison();
let rpc_pred = Some(RPCPredicate {
root: Some(comparison),
});
assert_eq!(
"(FieldRef:host > 5)",
format!("{}", displayable_predicate(rpc_pred.as_ref()))
);
}
#[test]
fn test_displayable_predicate_three_args() {
// Make one with more than two children (not sure if this ever happens)
let node = RPCNode {
node_type: RPCNodeType::LogicalExpression as i32,
children: vec![
make_tag_ref_node(b"tag1", "val1"),
make_tag_ref_node(b"tag2", "val2"),
make_tag_ref_node(b"tag3", "val3"),
],
value: Some(RPCValue::Logical(RPCLogical::And as i32)),
};
let rpc_pred = Some(RPCPredicate { root: Some(node) });
assert_eq!(
"AND((TagRef:tag1 == \"val1\"), (TagRef:tag2 == \"val2\"), (TagRef:tag3 == \"val3\"))",
format!("{}", displayable_predicate(rpc_pred.as_ref()))
);
}
#[test]
fn test_displayable_predicate_mesurement_and_field() {
// Make one with more than two children (not sure if this ever happens)
let node = RPCNode {
node_type: RPCNodeType::LogicalExpression as i32,
children: vec![
make_tag_ref_node(&[0], "val1"),
make_tag_ref_node(b"tag2", "val2"),
make_tag_ref_node(&[255], "val3"),
],
value: Some(RPCValue::Logical(RPCLogical::And as i32)),
};
let rpc_pred = Some(RPCPredicate { root: Some(node) });
assert_eq!(
"AND((TagRef:_m[0x00] == \"val1\"), (TagRef:tag2 == \"val2\"), (TagRef:_f[0xff] == \"val3\"))",
format!("{}", displayable_predicate(rpc_pred.as_ref()))
);
}
} | write!(f, "AND") |
middle.go | package middle
import (
"log"
"net/http"
)
| func Log(w http.ResponseWriter, r *http.Request) {
log.Println(r.RequestURI)
} |
|
webpack.config.js | const path = require('path')
const HTMLPlugin = require('html-webpack-plugin')
const webpack = require('webpack')
const ExtractPlugin = require('extract-text-webpack-plugin')
const isDev = process.env.NODE_ENV === 'development'
const config = {
target: 'web',
entry: path.join(__dirname, 'src/index.js'),
output: {
filename: 'bundle.[hash:8].js',
path: path.join(__dirname, 'dist')
},
module: {
rules: [
{
test: /\.vue$/,
loader: 'vue-loader'
},
{
test: /\.jsx$/,
loader: 'babel-loader'
},
{
test: /\.(gif|jpg|jpeg|png|svg)$/,
use: [
{
loader: 'url-loader', // 是对file-loader的封装
options: {
limit: 1024, // 如果图片小于1024,就转化成base64位
name: '[name].[ext]'
}
}
]
}
]
},
plugins: [ | }),
new HTMLPlugin()
],
// externals: {
// 'vue': 'Vue'
// }
}
if (isDev) {
// 开发环境的css代码可以内联
config.module.rules.push({
test: /\.styl$/,
use: [
'style-loader',
'css-loader',
{
loader: 'postcss-loader',
options: {
sourceMap: true // stylus-loader会生成sourceMap,postcss-loader也会,加上这个选项表示用生成的sourceMap,提示编译效率
}
},
'stylus-loader'
]
})
config.devtool = '#cheap-module-eval-source-map'
// devServer 在webpack2.0后才有的
config.devServer = {
port: 8000,
// host: '127.0.0.1', // 好处: 可以在别人电脑上通过ip访问,或者手机
host: '0.0.0.0', // 好处: 可以在别人电脑上通过ip访问,或者手机
overlay: { // 编译的时候出现错误,就显示到网页上
errors: true
},
hot: true, // 热更新,只更新修改的页面,不会刷新整个页面
// open: true // 自动打开网页
}
// 热更新的相关插件
config.plugins.push(
new webpack.HotModuleReplacementPlugin(),
new webpack.NoEmitOnErrorsPlugin()
)
} else {
// 第三方的类库,一般比较稳定,不会和业务代码一样经常变动,所以要单独打包
config.entry = {
app: path.join(__dirname, 'src/index.js'),
vendor: ['vue']
}
// 如果是hash,那么每次打包,各个带有hash的文件,他们的hash都是相同的,
// 这样,每次生成环境打包后,vendor也是每次都变化了,每次都会重新加载,就没有单独打包的意义了
// 使用chunkhash的话,每个单独文件的hash都不同
config.output.filename = '[name].[chunkhash:8].js'
// 生产环境的css需要外联
config.module.rules.push({
test: /\.styl$/,
use: ExtractPlugin.extract({
fallback: 'style-loader',
use: [
'css-loader',
{
loader: 'postcss-loader',
options: {
sourceMap: true
}
},
'stylus-loader'
]
})
})
config.plugins.push(
new ExtractPlugin('styles.[contentHash:8].css'), // css相关插件
new webpack.optimize.CommonsChunkPlugin({ // 第三方类库打包,单独打包到vendor.js中
name: 'vendor'
}),
// FIXME: 这边还是不大明白,再看看https://www.imooc.com/video/16410
new webpack.optimize.CommonsChunkPlugin({
name: 'runtime'
})
)
}
module.exports = config | new webpack.DefinePlugin({
'process.env': {
NODE_ENV: isDev ? '"development"' : '"production"'
} |
example2.js | switch (prompt("What is the weather like?")) {
case "rainy":
console.log("Remember to bring an umbrella.");
break;
case "sunny":
console.log("Dress lightly."); | console.log("Go outside");
break;
default:
console.log("Unknown weather type");
break;
} | case "cloudy": |
metadata.rs | use super::cache::GlobalCache;
use super::dir::DirEntry;
use super::dist::server::CacheServer;
use super::fs_util;
use super::node::{self, DefaultNode, Node};
use super::RenameParam;
use crate::async_fuse::fuse::protocol::{FuseAttr, INum, FUSE_ROOT_ID};
use crate::async_fuse::util;
use crate::common::etcd_delegate::EtcdDelegate;
use anyhow::Context;
use async_trait::async_trait;
use log::debug;
use nix::errno::Errno;
use nix::fcntl::OFlag;
use nix::sys::stat::SFlag;
use nix::unistd;
use smol::lock::{RwLock, RwLockWriteGuard};
use std::collections::{BTreeMap, BTreeSet};
use std::os::unix::io::RawFd;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use utilities::Cast;
/// The time-to-live seconds of FUSE attributes
const MY_TTL_SEC: u64 = 3600; // TODO: should be a long value, say 1 hour
/// The generation ID of FUSE attributes
const MY_GENERATION: u64 = 1; // TODO: find a proper way to set generation
/// MetaData of fs
#[async_trait]
pub trait MetaData {
/// Node type
type N: Node + Send + Sync + 'static;
/// Create `MetaData`
async fn new(
root_path: &str,
capacity: usize,
ip: &str,
port: &str,
etcd_client: EtcdDelegate,
node_id: &str,
volume_info: &str,
) -> (Arc<Self>, Option<CacheServer>);
/// Helper function to create node
async fn create_node_helper(
&self,
parent: INum,
node_name: &str,
mode: u32,
node_type: SFlag,
target_path: Option<&Path>,
) -> anyhow::Result<(Duration, FuseAttr, u64)>;
/// Helper function to remove node
async fn remove_node_helper(
&self,
parent: INum,
node_name: &str,
node_type: SFlag,
) -> anyhow::Result<()>;
/// Helper function to lookup
async fn lookup_helper(
&self,
parent: INum,
name: &str,
) -> anyhow::Result<(Duration, FuseAttr, u64)>;
/// Rename helper to exchange on disk
async fn rename_exchange_helper(&self, param: RenameParam) -> anyhow::Result<()>;
/// Rename helper to move on disk, it may replace destination entry
async fn rename_may_replace_helper(&self, param: RenameParam) -> anyhow::Result<()>;
/// Helper function of fsync
async fn fsync_helper(&self, ino: u64, fh: u64, datasync: bool) -> anyhow::Result<()>;
// TODO: Should hide this implementation detail
/// Get metadata cache
fn cache(&self) -> &RwLock<BTreeMap<INum, Self::N>>;
/// Delete cache in trash if necessary
async fn delete_trash(&self, ino: INum) -> bool;
/// Helper function to write data
async fn write_helper(
&self,
ino: u64,
fh: u64,
offset: i64,
data: Vec<u8>,
flags: u32,
) -> anyhow::Result<usize>;
}
/// File system in-memory meta-data
#[derive(Debug)]
pub struct DefaultMetaData {
/// The root path and the mount point of the FUSE filesystem
root_path: PathBuf,
/// The cache to hold opened directories and files
pub(crate) cache: RwLock<BTreeMap<INum, DefaultNode>>,
/// The trash to hold deferred deleted directories and files
trash: RwLock<BTreeSet<INum>>,
/// Global data cache
data_cache: Arc<GlobalCache>,
}
#[async_trait]
impl MetaData for DefaultMetaData {
type N = DefaultNode;
async fn new(
root_path: &str,
capacity: usize,
_: &str,
_: &str,
_: EtcdDelegate,
_: &str,
_: &str,
) -> (Arc<Self>, Option<CacheServer>) {
let root_path = Path::new(root_path)
.canonicalize()
.with_context(|| format!("failed to canonicalize the mount path={:?}", root_path))
.unwrap_or_else(|e| panic!("{}", e));
let root_path = root_path
.as_os_str()
.to_str()
.unwrap_or_else(|| panic!("failed to convert to utf8 string"));
let meta = Arc::new(Self {
root_path: root_path.into(),
cache: RwLock::new(BTreeMap::new()),
trash: RwLock::new(BTreeSet::new()),
data_cache: Arc::new(GlobalCache::new_with_capacity(capacity)),
});
let root_inode =
DefaultNode::open_root_node(FUSE_ROOT_ID, "/", root_path, Arc::clone(&meta))
.await
.context("failed to open FUSE root node")
.unwrap_or_else(|e| {
panic!("{}", e);
});
meta.cache.write().await.insert(FUSE_ROOT_ID, root_inode);
(meta, None)
}
/// Get metadata cache
fn cache(&self) -> &RwLock<BTreeMap<INum, Self::N>> {
&self.cache
}
/// Delete node from trash
async fn delete_trash(&self, ino: INum) -> bool {
let mut trash = self.trash.write().await;
if trash.contains(&ino) {
// deferred deletion
trash.remove(&ino);
let mut cache = self.cache.write().await;
let deleted_node = cache.remove(&ino).unwrap_or_else(|| {
panic!(
"forget() found fs is inconsistent, i-node of ino={} \
found in trash, but no i-node found for deferred deletion",
ino,
);
});
debug_assert_eq!(deleted_node.get_lookup_count(), 0);
debug!(
"forget() deferred deleted i-node of ino={} and name={:?}",
ino,
deleted_node.get_name(),
);
return true;
}
false
}
/// Helper function to create node
async fn create_node_helper(
&self,
parent: INum,
node_name: &str,
mode: u32,
node_type: SFlag,
target_path: Option<&Path>,
) -> anyhow::Result<(Duration, FuseAttr, u64)> {
// pre-check
let mut cache = self.cache.write().await;
let parent_node = self
.create_node_pre_check(parent, node_name, &mut cache)
.await
.context("create_node_helper() failed to pre check")?;
let parent_name = parent_node.get_name().to_owned();
// all checks are passed, ready to create new node
let m_flags = fs_util::parse_mode(mode);
let new_ino: u64;
let new_node = match node_type {
SFlag::S_IFDIR => {
debug!(
"create_node_helper() about to create a sub-directory with name={:?} and mode={:?} \
under parent directory of ino={} and name={:?}",
node_name, m_flags, parent, parent_name,
);
parent_node
.create_child_dir(node_name, m_flags)
.await
.context(format!(
"create_node_helper() failed to create directory with name={:?} and mode={:?} \
under parent directory of ino={} and name={:?}",
node_name, m_flags, parent, parent_name,
))?
}
SFlag::S_IFREG => {
let o_flags = OFlag::O_CREAT | OFlag::O_EXCL | OFlag::O_RDWR;
debug!(
"helper_create_node() about to \
create a file with name={:?}, oflags={:?}, mode={:?} \
under parent directory of ino={} and name={:?}",
node_name, o_flags, m_flags, parent, parent_name,
);
parent_node
.create_child_file(
node_name,
o_flags,
m_flags,
Arc::<GlobalCache>::clone(&self.data_cache),
)
.await
.context(format!(
"create_node_helper() failed to create file with name={:?} and mode={:?} \
under parent directory of ino={} and name={:?}",
node_name, m_flags, parent, parent_name,
))?
}
SFlag::S_IFLNK => {
debug!(
"create_node_helper() about to \
create a symlink with name={:?} to target path={:?} \
under parent directory of ino={} and name={:?}",
node_name, target_path, parent, parent_name
);
parent_node
.create_child_symlink(
node_name,
target_path.unwrap_or_else(|| panic!(
"create_node_helper() failed to \
get target path when create symlink with name={:?} \
under parent directory of ino={} and name={:?}",
node_name, parent, parent_node.get_name(),
)).to_owned(),
)
.await
.context(format!(
"create_node_helper() failed to create symlink with name={:?} to target path={:?} \
under parent directory of ino={} and name={:?}",
node_name, target_path, parent, parent_name,
))?
}
_ => {
panic!(
"create_node_helper() found unsupported i-node type={:?} with name={:?} to create \
under parent directory of ino={} and name={:?}",
node_type, node_name, parent, parent_name,
);
}
};
new_ino = new_node.get_ino();
let new_node_attr = new_node.get_attr();
cache.insert(new_ino, new_node);
let ttl = Duration::new(MY_TTL_SEC, 0);
let fuse_attr = fs_util::convert_to_fuse_attr(new_node_attr);
debug!(
"create_node_helper() successfully created the new child name={:?} \
of ino={} and type={:?} under parent ino={} and name={:?}",
node_name, new_ino, node_type, parent, parent_name,
);
Ok((ttl, fuse_attr, MY_GENERATION))
}
/// Helper function to remove node
async fn remove_node_helper(
&self,
parent: INum,
node_name: &str,
node_type: SFlag,
) -> anyhow::Result<()> {
let node_ino: INum;
{
// pre-checks
let cache = self.cache.read().await;
let parent_node = cache.get(&parent).unwrap_or_else(|| {
panic!(
"remove_node_helper() found fs is inconsistent, \
parent of ino={} should be in cache before remove its child",
parent,
);
});
match parent_node.get_entry(node_name) {
None => {
debug!(
"remove_node_helper() failed to find i-node name={:?} \
under parent of ino={}",
node_name, parent,
);
return util::build_error_result_from_errno(
Errno::ENOENT,
format!(
"remove_node_helper() failed to find i-node name={:?} \
under parent of ino={}",
node_name, parent,
),
);
}
Some(child_entry) => {
node_ino = child_entry.ino();
if let SFlag::S_IFDIR = node_type {
// check the directory to delete is empty
let dir_node = cache.get(&node_ino).unwrap_or_else(|| {
panic!(
"remove_node_helper() found fs is inconsistent, \
directory name={:?} of ino={} \
found under the parent of ino={}, \
but no i-node found for this directory",
node_name, node_ino, parent,
);
});
if !dir_node.is_node_data_empty() {
debug!(
"remove_node_helper() cannot remove \
the non-empty directory name={:?} of ino={} \
under the parent directory of ino={}",
node_name, node_ino, parent,
);
return util::build_error_result_from_errno(
Errno::ENOTEMPTY,
format!(
"remove_node_helper() cannot remove \
the non-empty directory name={:?} of ino={} \
under the parent directory of ino={}",
node_name, node_ino, parent,
),
);
}
}
let child_inode = cache.get(&node_ino).unwrap_or_else(|| {
panic!(
"remove_node_helper() found fs is inconsistent, \
i-node name={:?} of ino={} found under the parent of ino={}, \
but no i-node found for this node",
node_name, node_ino, parent
)
});
debug_assert_eq!(node_ino, child_inode.get_ino());
debug_assert_eq!(node_name, child_inode.get_name());
debug_assert_eq!(parent, child_inode.get_parent_ino());
debug_assert_eq!(node_type, child_inode.get_type());
debug_assert_eq!(node_type, child_inode.get_attr().kind);
}
}
}
{
// all checks passed, ready to remove,
// when deferred deletion, remove entry from directory first
self.may_deferred_delete_node_helper(node_ino)
.await
.context(format!(
"remove_node_helper() failed to maybe deferred delete child i-node of ino={}, \
name={:?} and type={:?} under parent ino={}",
node_ino, node_name, node_type, parent,
))?;
// reply.ok().await?;
debug!(
"remove_node_helper() successfully removed child i-node of ino={}, \
name={:?} and type={:?} under parent ino={}",
node_ino, node_name, node_type, parent,
);
Ok(())
}
}
/// Helper function to lookup
async fn lookup_helper(
&self,
parent: INum,
child_name: &str,
) -> anyhow::Result<(Duration, FuseAttr, u64)> {
let pre_check_res = self.lookup_pre_check(parent, child_name).await;
let (ino, child_type) = match pre_check_res {
Ok((ino, child_type)) => (ino, child_type),
Err(e) => {
debug!(
"lookup() failed to pre-check, the error is: {}",
crate::common::util::format_anyhow_error(&e),
);
return Err(e);
}
};
let ttl = Duration::new(MY_TTL_SEC, 0);
{
// cache hit
let cache = self.cache.read().await;
if let Some(node) = cache.get(&ino) {
debug!(
"lookup_helper() cache hit when searching i-node of \
ino={} and name={:?} under parent ino={}",
ino, child_name, parent,
);
let attr = node.lookup_attr();
let fuse_attr = fs_util::convert_to_fuse_attr(attr);
debug!(
"lookup_helper() successfully found in cache the i-node of \
ino={} name={:?} under parent ino={}, the attr={:?}",
ino, child_name, parent, &attr,
);
return Ok((ttl, fuse_attr, MY_GENERATION));
}
}
{
// cache miss
debug!(
"lookup_helper() cache missed when searching parent ino={}
and i-node of ino={} and name={:?}",
parent, ino, child_name,
);
let mut cache = self.cache.write().await;
let parent_node = cache.get_mut(&parent).unwrap_or_else(|| {
panic!(
"lookup_helper() found fs is inconsistent, \
parent i-node of ino={} should be in cache",
parent,
);
});
let parent_name = parent_node.get_name().to_owned();
let child_node = match child_type {
SFlag::S_IFDIR => {
parent_node
.open_child_dir(child_name, None)
.await
.context(format!(
"lookup_helper() failed to open sub-directory name={:?} \
under parent directory of ino={} and name={:?}",
child_name, parent, parent_name,
))?
}
SFlag::S_IFREG => {
let oflags = OFlag::O_RDWR;
parent_node
.open_child_file(
child_name,
None,
oflags,
Arc::<GlobalCache>::clone(&self.data_cache),
)
.await
.context(format!(
"lookup_helper() failed to open child file name={:?} with flags={:?} \
under parent directory of ino={} and name={:?}",
child_name, oflags, parent, parent_name,
))?
}
SFlag::S_IFLNK => parent_node
.load_child_symlink(child_name, None)
.await
.context(format!(
"lookup_helper() failed to read child symlink name={:?} \
under parent directory of ino={} and name={:?}",
child_name, parent, parent_name,
))?,
_ => panic!(
"lookup_helper() found unsupported file type={:?}",
child_type,
),
};
let child_ino = child_node.get_ino();
let attr = child_node.lookup_attr();
cache.insert(child_ino, child_node);
let fuse_attr = fs_util::convert_to_fuse_attr(attr);
debug!(
"lookup_helper() successfully found the i-node of ino={} and name={:?} \
under parent of ino={} and name={:?}",
child_ino, child_name, parent, parent_name,
);
Ok((ttl, fuse_attr, MY_GENERATION))
}
}
/// Rename helper to exchange on disk
async fn rename_exchange_helper(&self, param: RenameParam) -> anyhow::Result<()> {
let old_parent = param.old_parent;
let old_name = param.old_name.as_str();
let new_parent = param.new_parent;
let new_name = param.new_name;
let flags = param.flags;
debug!(
"rename(old parent={}, old name={:?}, new parent={}, new name={:?})",
old_parent, old_name, new_parent, new_name,
);
let no_replace = flags == 1; // RENAME_NOREPLACE
let pre_check_res = self
.rename_pre_check(old_parent, old_name, new_parent, &new_name, no_replace)
.await;
let (_, _, _, new_entry_ino) = match pre_check_res {
Ok((old_parent_fd, old_entry_ino, new_parent_fd, new_entry_ino)) => {
(old_parent_fd, old_entry_ino, new_parent_fd, new_entry_ino)
}
Err(e) => {
debug!(
"rename() pre-check failed, the error is: {}",
crate::common::util::format_anyhow_error(&e)
);
return Err(e);
}
};
let new_entry_ino = new_entry_ino.unwrap_or_else(|| panic!("new entry ino is None"));
let rename_in_cache_res = self
.rename_in_cache_helper(old_parent, old_name, new_parent, &new_name)
.await;
if let Some(replaced_entry) = rename_in_cache_res {
debug_assert_eq!(
new_entry_ino,
replaced_entry.ino(),
"rename_exchange_helper() replaced entry i-number not match"
);
let exchange_entry = DirEntry::new(
new_entry_ino,
old_name.to_owned(),
replaced_entry.entry_type(),
);
// TODO: support thread-safe
let mut cache = self.cache.write().await;
let old_parent_node = cache.get_mut(&old_parent).unwrap_or_else(|| {
panic!(
"impossible case when rename, the from parent i-node of ino={} should be in cache",
old_parent,
)
});
let insert_res = old_parent_node
.insert_entry_for_rename(exchange_entry)
.await;
debug_assert!(
insert_res.is_none(),
"impossible case when rename, the from i-node of name={:?} should have been \
moved out of from parent directory ino={} and name={:?}",
old_name,
old_parent,
old_parent_node.get_name(),
);
// TODO: finish rename exchange when libc::rename2 is available
// call rename2 here to exchange two nodes
let exchanged_node = cache.get_mut(&new_entry_ino).unwrap_or_else(|| {
panic!(
"impossible case when rename, the new entry i-node of ino={} should be in cache",
new_entry_ino,
)
});
exchanged_node.set_parent_ino(old_parent);
exchanged_node.set_name(old_name);
let exchanged_attr = exchanged_node
.load_attribute()
.await
.context(format!(
"rename_exchange_helper() failed to load attribute of \
to i-node of ino={} and name={:?} under parent directory",
new_entry_ino, new_name,
))
.unwrap_or_else(|e| {
panic!(
"rename_exchange_helper() failed to load attributed of to i-node of ino={} and name={:?}, \
the error is: {}",
exchanged_node.get_ino(), exchanged_node.get_name(),
crate::common::util::format_anyhow_error(&e),
)
});
debug_assert_eq!(exchanged_attr.ino, exchanged_node.get_ino());
debug_assert_eq!(exchanged_attr.ino, new_entry_ino);
panic!("rename2 system call has not been supported in libc to exchange two nodes yet!");
} else {
panic!(
"impossible case, the child i-node of name={:?} to be exchanged \
should be under to parent directory ino={}",
new_name, new_parent,
);
}
}
/// Rename helper to move on disk, it may replace destination entry
async fn rename_may_replace_helper(&self, param: RenameParam) -> anyhow::Result<()> {
let old_parent = param.old_parent;
let old_name = param.old_name;
let new_parent = param.new_parent;
let new_name = param.new_name;
let flags = param.flags;
debug!(
"rename(old parent={}, old name={:?}, new parent={}, new name={:?})",
old_parent, old_name, new_parent, new_name,
);
let no_replace = flags == 1; // RENAME_NOREPLACE
let pre_check_res = self
.rename_pre_check(old_parent, &old_name, new_parent, &new_name, no_replace)
.await;
let (old_parent_fd, old_entry_ino, new_parent_fd, new_entry_ino) = match pre_check_res {
Ok((old_parent_fd, old_entry_ino, new_parent_fd, new_entry_ino)) => {
(old_parent_fd, old_entry_ino, new_parent_fd, new_entry_ino)
}
Err(e) => {
debug!(
"rename() pre-check failed, the error is: {}",
crate::common::util::format_anyhow_error(&e)
);
return Err(e);
}
};
// Just replace new entry, may deferred delete
if let Some(new_ino) = new_entry_ino {
self.may_deferred_delete_node_helper(new_ino)
.await
.context(format!(
"rename_may_replace_helper() failed to \
maybe deferred delete the replaced i-node ino={}",
new_ino,
))?;
}
let old_name_clone = old_name.clone();
let new_name_clone = new_name.clone();
// Rename on disk
smol::unblock(move || {
nix::fcntl::renameat(
Some(old_parent_fd),
Path::new(&old_name_clone),
Some(new_parent_fd),
Path::new(&new_name_clone),
)
})
.await
.context(format!(
"rename_may_replace_helper() failed to move the from i-node name={:?} under \
from parent ino={} to the to i-node name={:?} under new parent ino={}",
old_name, old_parent, new_name, new_parent,
))?;
{
let mut cache = self.cache.write().await;
let moved_node = cache.get_mut(&old_entry_ino).unwrap_or_else(|| {
panic!(
"impossible case when rename, the from entry i-node of ino={} should be in cache",
old_entry_ino,
)
});
moved_node.set_parent_ino(new_parent);
moved_node.set_name(&new_name);
let moved_attr = moved_node
.load_attribute()
.await
.context(format!(
"rename_may_replace_helper() failed to \
load attribute of old entry i-node of ino={}",
old_entry_ino,
))
.unwrap_or_else(|e| {
panic!(
"rename() failed, the error is: {}",
crate::common::util::format_anyhow_error(&e)
)
});
debug_assert_eq!(moved_attr.ino, moved_node.get_ino());
debug_assert_eq!(moved_attr.ino, old_entry_ino);
debug!(
"rename_may_replace_helper() successfully moved the from i-node \
of ino={} and name={:?} under from parent ino={} to \
the to i-node of ino={} and name={:?} under to parent ino={}",
old_entry_ino, old_name, old_parent, old_entry_ino, new_name, new_parent,
);
}
let rename_replace_res = self
.rename_in_cache_helper(old_parent, &old_name, new_parent, &new_name)
.await;
debug_assert!(
rename_replace_res.is_none(),
"may_deferred_delete_node_helper() should already have \
deleted the target i-node to be replaced",
);
Ok(())
}
/// Helper function of fsync
async fn fsync_helper(
&self,
ino: u64,
fh: u64,
datasync: bool,
// reply: ReplyEmpty,
) -> anyhow::Result<()> {
// TODO: handle datasync
#[cfg(target_os = "linux")]
{
// attributes are not allowed on if expressions
if datasync {
smol::unblock(move || unistd::fdatasync(fh.cast()))
.await
.context(format!(
"fsync_helper() failed to flush the i-node of ino={}",
ino
))?;
} else {
smol::unblock(move || unistd::fsync(fh.cast()))
.await
.context(format!(
"fsync_helper() failed to flush the i-node of ino={}",
ino
))?;
}
}
#[cfg(target_os = "macos")]
{
smol::unblock(|| unistd::fsync(fh.cast()))
.await
.context(format!(
"fsync_helper() failed to flush the i-node of ino={}",
ino,
))?;
}
// reply.ok().await?;
debug!(
"fsync_helper() successfully sync the i-node of ino={}, fh={}, datasync={}",
ino, fh, datasync,
);
Ok(())
}
/// Helper function to write data
async fn write_helper(
&self,
ino: u64,
fh: u64,
offset: i64,
data: Vec<u8>,
flags: u32,
) -> anyhow::Result<usize> {
let mut cache = self.cache().write().await;
let inode = cache.get_mut(&ino).unwrap_or_else(|| {
panic!(
"write() found fs is inconsistent, \
the i-node of ino={} should be in cache",
ino,
);
});
debug!(
"write_helper() about to write {} byte data to file of ino={} \
and name {:?} at offset={}",
data.len(),
ino,
inode.get_name(),
offset
);
let o_flags = fs_util::parse_oflag(flags);
let write_to_disk = true;
inode
.write_file(fh, offset, data, o_flags, write_to_disk)
.await
}
}
impl DefaultMetaData {
// FUSE operation helper functions
/// The pre-check before create node
#[allow(single_use_lifetimes)]
async fn create_node_pre_check<'a, 'b>(
&self,
parent: INum,
node_name: &str,
cache: &'b mut RwLockWriteGuard<'a, BTreeMap<INum, <Self as MetaData>::N>>,
) -> anyhow::Result<&'b mut <Self as MetaData>::N> {
let parent_node = cache.get_mut(&parent).unwrap_or_else(|| {
panic!(
"create_node_pre_check() found fs is inconsistent, \
parent of ino={} should be in cache before create it new child",
parent,
);
});
if let Some(occupied) = parent_node.get_entry(node_name) {
debug!(
"create_node_pre_check() found the directory of ino={} and name={:?} \
already exists a child with name={:?} and ino={}",
parent,
parent_node.get_name(),
node_name,
occupied.ino(),
);
return util::build_error_result_from_errno(
Errno::EEXIST,
format!(
"create_node_pre_check() found the directory of ino={} and name={:?} \
already exists a child with name={:?} and ino={}",
parent,
parent_node.get_name(),
node_name,
occupied.ino(),
),
);
}
Ok(parent_node)
}
/// Helper function to delete or deferred delete node | async fn may_deferred_delete_node_helper(&self, ino: INum) -> anyhow::Result<()> {
let parent_ino: INum;
let node_name: String;
let mut deferred_deletion = false;
{
// pre-check whether deferred delete or not
let cache = self.cache.read().await;
let node = cache.get(&ino).unwrap_or_else(|| {
panic!(
"may_deferred_delete_node_helper() failed to \
find the i-node of ino={} to remove",
ino,
);
});
parent_ino = node.get_parent_ino();
node_name = node.get_name().to_owned();
debug_assert!(node.get_lookup_count() >= 0); // lookup count cannot be negative
if node.get_lookup_count() > 0 {
// TODO: support thread-safe to avoid race condition
deferred_deletion = true;
}
}
{
// remove entry from parent i-node
let mut cache = self.cache.write().await;
let parent_node = cache.get_mut(&parent_ino).unwrap_or_else(|| {
panic!(
"may_deferred_delete_node_helper() failed to \
find the parent of ino={} for i-node of ino={}",
parent_ino, ino,
);
});
let deleted_entry = parent_node.unlink_entry(&node_name).await.context(format!(
"may_deferred_delete_node_helper() failed to remove entry name={:?} \
and ino={} from parent directory ino={}",
node_name, ino, parent_ino,
))?;
debug_assert_eq!(node_name, deleted_entry.entry_name());
debug_assert_eq!(deleted_entry.ino(), ino);
}
if deferred_deletion {
// deferred deletion
// TODO: support thread-safe
let cache = self.cache.read().await;
let node = cache.get(&ino).unwrap_or_else(|| {
panic!(
"impossible case, may_deferred_delete_node_helper() \
should already find the i-node of ino={} to remove",
ino,
);
});
{
let mut trash = self.trash.write().await;
let insert_result = trash.insert(ino); // check thread-safe in case of deferred deletion race
debug_assert!(
insert_result,
"failed to insert i-node of ino={} into trash for deferred deletion",
ino,
);
}
debug!(
"may_deferred_delete_node_helper() defered removed \
the i-node name={:?} of ino={} under parent ino={}, \
open count={}, lookup count={}",
node.get_name(),
ino,
parent_ino,
node.get_open_count(),
node.get_lookup_count(),
);
} else {
// immediate deletion
let mut cache = self.cache.write().await;
let inode = cache.remove(&ino).unwrap_or_else(|| {
panic!(
"impossible case, may_deferred_delete_node_helper() \
should remove the i-node of ino={} immediately",
ino,
);
}); // TODO: support thread-safe
debug!(
"may_deferred_delete_node_helper() immediately removed \
the i-node name={:?} of ino={} under parent ino={}, \
open count={}, lookup count={}",
inode.get_name(),
ino,
parent_ino,
inode.get_open_count(),
inode.get_lookup_count(),
);
}
Ok(())
}
/// Lookup helper function to pre-check
async fn lookup_pre_check(&self, parent: INum, name: &str) -> anyhow::Result<(INum, SFlag)> {
// lookup child ino and type first
let cache = self.cache.read().await;
let parent_node = cache.get(&parent).unwrap_or_else(|| {
panic!(
"lookup_helper() found fs is inconsistent, \
the parent i-node of ino={} should be in cache",
parent,
);
});
if let Some(child_entry) = parent_node.get_entry(name) {
let ino = child_entry.ino();
let child_type = child_entry.entry_type();
Ok((ino, child_type))
} else {
debug!(
"lookup_helper() failed to find the file name={:?} \
under parent directory of ino={} and name={:?}",
name,
parent,
parent_node.get_name(),
);
// lookup() didn't find anything, this is normal
util::build_error_result_from_errno(
Errno::ENOENT,
format!(
"lookup_helper() failed to find the file name={:?} \
under parent directory of ino={} and name={:?}",
name,
parent,
parent_node.get_name(),
),
)
}
}
/// Rename helper function to pre-check
async fn rename_pre_check(
&self,
old_parent: INum,
old_name: &str,
new_parent: INum,
new_name: &str,
no_replace: bool,
) -> anyhow::Result<(RawFd, INum, RawFd, Option<INum>)> {
let cache = self.cache.read().await;
let old_parent_node = cache.get(&old_parent).unwrap_or_else(|| {
panic!(
"rename() found fs is inconsistent, \
the parent i-node of ino={} should be in cache",
old_parent,
);
});
let old_parent_fd = old_parent_node.get_fd();
let old_entry_ino = match old_parent_node.get_entry(old_name) {
None => {
debug!(
"rename() failed to find child entry of name={:?} under parent directory ino={} and name={:?}",
old_name, old_parent, old_parent_node.get_name(),
);
return util::build_error_result_from_errno(
Errno::ENOENT,
format!(
"rename_pre_check() failed to find child entry of name={:?} \
under parent directory ino={} and name={:?}",
old_name,
old_parent,
old_parent_node.get_name(),
),
);
}
Some(old_entry) => {
debug_assert_eq!(&old_name, &old_entry.entry_name());
if cache.get(&old_entry.ino()).is_none() {
panic!(
"rename() found fs is inconsistent, the i-node of ino={} and name={:?} \
under parent directory of ino={} and name={:?} to rename should be in cache",
old_entry.ino(), old_name, old_parent, old_parent_node.get_name(),
);
// return;
}
old_entry.ino()
}
};
let new_parent_node = cache.get(&new_parent).unwrap_or_else(|| {
panic!(
"rename() found fs is inconsistent, \
the new parent i-node of ino={} should be in cache",
new_parent,
);
});
let new_parent_fd = new_parent_node.get_fd();
let new_entry_ino = if let Some(new_entry) = new_parent_node.get_entry(new_name) {
debug_assert_eq!(&new_name, &new_entry.entry_name());
let new_ino = new_entry.ino();
if cache.get(&new_ino).is_none() {
panic!(
"rename() found fs is inconsistent, the i-node of ino={} and name={:?} \
under parent directory of ino={} and name={:?} to replace should be in cache",
new_ino, new_name, new_parent, new_parent_node.get_name(),
);
}
if no_replace {
debug!(
"rename() found i-node of ino={} and name={:?} under new parent ino={} and name={:?}, \
but RENAME_NOREPLACE is specified",
new_ino, new_name, new_parent, new_parent_node.get_name(),
);
return util::build_error_result_from_errno(
Errno::EEXIST, // RENAME_NOREPLACE
format!(
"rename() found i-node of ino={} and name={:?} under new parent ino={} and name={:?}, \
but RENAME_NOREPLACE is specified",
new_ino, new_name, new_parent, new_parent_node.get_name(),
),
);
}
debug!(
"rename() found the new parent directory of ino={} and name={:?} already has a child with name={:?}",
new_parent, new_parent_node.get_name(), new_name,
);
Some(new_ino)
} else {
None
};
Ok((old_parent_fd, old_entry_ino, new_parent_fd, new_entry_ino))
}
/// Rename in cache helper
async fn rename_in_cache_helper(
&self,
old_parent: INum,
old_name: &str,
new_parent: INum,
new_name: &str,
) -> Option<DirEntry> {
let entry_to_move = {
// TODO: support thread-safe
let mut cache = self.cache.write().await;
let old_parent_node = cache.get_mut(&old_parent).unwrap_or_else(|| {
panic!(
"impossible case when rename, the from parent i-node of ino={} should be in cache",
old_parent,
)
});
match old_parent_node.remove_entry_for_rename(old_name).await {
None => panic!(
"impossible case when rename, the from entry of name={:?} \
should be under from directory ino={} and name={:?}",
old_name,
old_parent,
old_parent_node.get_name(),
),
Some(old_entry) => {
DirEntry::new(old_entry.ino(), new_name.to_owned(), old_entry.entry_type())
}
}
};
node::rename_fullpath_recursive(entry_to_move.ino(), new_parent, &self.cache).await;
{
// TODO: support thread-safe
let mut cache = self.cache.write().await;
let new_parent_node = cache.get_mut(&new_parent).unwrap_or_else(|| {
panic!(
"impossible case when rename, the to parent i-node of ino={} should be in cache",
new_parent
)
});
new_parent_node.insert_entry_for_rename(entry_to_move).await
}
}
} | |
base.py | """
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (techfest_management/config/settings/base.py - 3 = techfest_management/)
APPS_DIR = ROOT_DIR.path('techfest_management')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'Asia/Kolkata'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True |
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///techfest_management'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
]
LOCAL_APPS = [
'techfest_management.users.apps.UsersAppConfig',
'management',
'events',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'techfest_management.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = 'DENY'
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = 'admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Dhaval Savalia""", '[email protected]'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# Celery
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['techfest_management.taskapp.celery.CeleryAppConfig']
if USE_TZ:
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-timezone
CELERY_TIMEZONE = TIME_ZONE
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_url
CELERY_BROKER_URL = env('CELERY_BROKER_URL')
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_backend
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-accept_content
CELERY_ACCEPT_CONTENT = ['json']
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-task_serializer
CELERY_TASK_SERIALIZER = 'json'
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-result_serializer
CELERY_RESULT_SERIALIZER = 'json'
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERYD_TASK_TIME_LIMIT = 5 * 60
# http://docs.celeryproject.org/en/latest/userguide/configuration.html#task-soft-time-limit
# TODO: set to whatever value is adequate in your circumstances
CELERYD_TASK_SOFT_TIME_LIMIT = 60
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'techfest_management.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'techfest_management.users.adapters.SocialAccountAdapter'
# django-compressor
# ------------------------------------------------------------------------------
# https://django-compressor.readthedocs.io/en/latest/quickstart/#installation
INSTALLED_APPS += ['compressor']
STATICFILES_FINDERS += ['compressor.finders.CompressorFinder']
# Your stuff...
# ------------------------------------------------------------------------------ | |
export-query.ts | import { Component, OnInit, OnDestroy, Inject, ChangeDetectorRef} from '@angular/core';
import { HttpResponse } from '@angular/common/http';
import { FormBuilder, FormGroup, Validators } from '@angular/forms';
import { Subscription } from 'rxjs';
import { ValidationService } from "@sinequa/core/validation";
import { NotificationsService } from "@sinequa/core/notification";
import { Utils } from "@sinequa/core/base";
import { ModalRef, ModalButton, ModalResult, MODAL_MODEL } from "@sinequa/core/modal";
import { ExportSourceType, ExportOutputFormat, CCWebService, CCApp} from "@sinequa/core/web-services";
import {SavedQueriesService, ExportQueryModel} from "../../saved-queries.service";
import {SelectionService} from "@sinequa/components/selection";
import { AppService } from '@sinequa/core/app-utils';
/**
* Component representing the Export dialog where user can customize the query export action.
*
*/
@Component({
selector: 'sq-export-query',
templateUrl: './export-query.html',
styleUrls: ["./export-query.scss"]
})
export class | implements OnInit, OnDestroy {
public readonly supportedFormats: ExportOutputFormat[] = [
ExportOutputFormat.Csv,
ExportOutputFormat.Xlsx,
ExportOutputFormat.Json
];
public readonly outputFormats: typeof ExportOutputFormat = ExportOutputFormat;
public readonly sourceTypes: typeof ExportSourceType = ExportSourceType;
public form: FormGroup;
public savedQueries: string[];
public buttons: ModalButton[];
public isDownloading: boolean;
public exportableColumns: string[];
private formChanges: Subscription;
constructor(
@Inject(MODAL_MODEL) public model: ExportQueryModel,
private formBuilder: FormBuilder,
private appService: AppService,
private selectionService: SelectionService,
private savedQueriesService: SavedQueriesService,
private validationService: ValidationService,
private notificationsService: NotificationsService,
private changeDetectorRef: ChangeDetectorRef,
public modalRef: ModalRef) { }
ngOnInit(): void {
this.savedQueries = [];
for (const query of this.savedQueriesService.savedqueries) {
this.savedQueries.push(query.name);
}
this.exportableColumns = [];
if (this.appService.app) {
const queryExportConfig = this.getDefaultQueryExportConfig(this.appService.app);
const columns = (queryExportConfig.columns && queryExportConfig.columns['column$']) || [];
for (const column of columns) {
this.exportableColumns.push(column.title);
}
}
this.form = this.formBuilder.group({
'format': [this.supportedFormats[0]],
'exportedColumns': [this.model.exportedColumns],
'export': [this.model.export, Validators.required],
'maxCount': [this.model.maxCount, Validators.compose([
this.validationService.integerValidator(),
this.validationService.minValidator(1)])],
});
this.isDownloading = false;
this.buttons = [
new ModalButton({
text: "msg#exportQuery.btnDownload",
result: ModalResult.Custom,
anchor: true,
primary: true,
action: (_button) => {
const observable = this.savedQueriesService.download(this.model);
if (observable) {
Utils.subscribe(observable,
(response: HttpResponse<Blob>) => {
console.log('exportQuery.download done.');
this.notificationsService.info('msg#exportQuery.successNotification');
this.modalRef.close(ModalResult.OK);
return response;
},
(error) => {
console.log('exportQuery.download failure - error: ', error);
this.modalRef.close(error);
});
this.isDownloading = true;
this.changeDetectorRef.markForCheck();
}
},
}),
new ModalButton({
result: ModalResult.Cancel,
})
];
const onFormChanged = () => {
const newFormat = this.form.value['format'];
const newMaxCount = this.form.value['maxCount'];
const newExportedColumns = this.form.value['exportedColumns'];
if (this.model.format !== newFormat) {
this.model.format = newFormat;
}
if (this.model.maxCount !== newMaxCount) {
this.model.maxCount = newMaxCount;
}
this.model.exportedColumns = newExportedColumns;
};
this.formChanges = Utils.subscribe(this.form.valueChanges, onFormChanged);
}
ngOnDestroy(): void {
if (this.formChanges) {
this.formChanges.unsubscribe();
}
}
private getDefaultQueryExportConfig(app: CCApp): CCQueryExport {
let queryExport = app.queryExport;
if (queryExport.indexOf(',') !== -1) {
queryExport = queryExport.substring(0, queryExport.indexOf(','));
}
return <CCQueryExport>Utils.getField(app.webServices, queryExport);
}
/**
* Check if the client has selected some records.
*
* @returns true if the client has selected some records.
*/
public hasSelectedRecords(): boolean {
return this.selectionService.haveSelectedRecords;
}
/**
* Checks if the user chosen export source is the same as the given one.
* <p>
* Used to control the radio button state.
*
* @param type The source to check.
* @returns true if the user chosen export source is the same as the given one.
*/
public sourceChosen(type: ExportSourceType): boolean {
return (this.model.export & type) !== 0;
}
/**
* Callback called when user chooses a new export source.
*
* @param event The related UI event.
* @param type The new chosen source.
*/
public sourceChanged(event: UIEvent, type: ExportSourceType): void {
const input = <HTMLInputElement>event.target;
if (input.checked) {
this.model.export = type;
}
}
/**
* Checks if the dialog allows user to choose export source.
* Generally, it returns false when the input model export type is already saved query.
*
* @returns true if the dialog allows user to choose export source.
*/
public showSourceChooser(): boolean {
return !this.sourceChosen(ExportSourceType.SavedQuery);
}
public close(): void {
this.modalRef.close(ModalResult.Cancel);
}
}
interface CCQueryExportColumnDef {
title: string;
pattern: string;
selectionQuery?: string;
}
interface CCQueryExport extends CCWebService {
webServiceType: "queryexport";
columns?: CCQueryExportColumnDef[];
linksFilterDuplicateUrls?: boolean;
linksGlobalRelevance?: string;
linksMaxCount?: number;
linksSortByOrder?: boolean;
maxCount?: number;
separator?: string;
}
| BsExportQuery |
settings.py | # -*- coding: utf-8 -*-
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import os.path
from pathlib import Path
from snowplow_tracker import Subject, Tracker, AsyncEmitter
from . import authentication, permissions
try:
from . import database
except:
import database
try:
from . import haystack
except:
import haystack
def parse_bool(val):
return val and val != "0" and str(val).lower() != "false"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# The SECRET_KEY is provided via an environment variable in OpenShift
SECRET_KEY = os.getenv(
"DJANGO_SECRET_KEY",
# safe value used for development when DJANGO_SECRET_KEY might not be set
"75f46345-af2d-497d-a3ec-b6f05e5266f4",
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = parse_bool(os.getenv("DEBUG", "True"))
DJANGO_DEBUG = parse_bool(os.getenv("DJANGO_DEBUG", "False"))
DEMO_SITE = parse_bool(os.getenv("DEMO_SITE", "False"))
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
# Add your apps here to enable them
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"haystack",
"rest_framework",
"drf_generators",
"drf_yasg",
"django_filters",
"vcr_server",
"api.v2",
"api.v3",
"api.v4",
"corsheaders",
"rest_hooks", # only required when using webhook subscriptions
"subscriptions", # only required when using webhook subscriptions
"agent_webhooks",
"django_nose",
]
TEST_RUNNER = "django_nose.NoseTestSuiteRunner"
HAYSTACK_CONNECTIONS = {"default": haystack.config()}
if parse_bool(os.getenv("ENABLE_REALTIME_INDEXING")):
print("Enabling realtime indexing ...")
HAYSTACK_SIGNAL_PROCESSOR = "api.v2.signals.RelatedRealtimeSignalProcessor"
else:
print("Realtime indexing has been disabled ...")
HAYSTACK_DOCUMENT_FIELD = "document"
HAYSTACK_MAX_RESULTS = 200
API_VERSION_ROUTING_MIDDLEWARE = os.getenv(
"API_VERSION_ROUTING_MIDDLEWARE",
"vcr_server.middleware.routing.HTTPHeaderRoutingMiddleware"
)
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
API_VERSION_ROUTING_MIDDLEWARE,
]
ROOT_URLCONF = "vcr_server.urls"
CORS_URLS_REGEX = r"^/api/.*$"
CORS_ORIGIN_ALLOW_ALL = True
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {"default": database.config()}
OPTIMIZE_TABLE_ROW_COUNTS = parse_bool(os.getenv("OPTIMIZE_TABLE_ROW_COUNTS", "True"))
CONN_MAX_AGE = CREDS_BATCH_SIZE = int(os.getenv("CONN_MAX_AGE", "0"))
if CONN_MAX_AGE < 0:
CONN_MAX_AGE = None
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
AUTH_USER_MODEL = "api_v2.User"
REST_FRAMEWORK = {
# "DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.NamespaceVersioning",
"DEFAULT_PAGINATION_CLASS": "vcr_server.pagination.EnhancedPageNumberPagination",
"PAGE_SIZE": 100,
"MAX_PAGE_SIZE": 200,
"DEFAULT_AUTHENTICATION_CLASSES": authentication.defaults(),
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly"
],
# Used for drf-yasg to split api specs into multiple versions
"DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.NamespaceVersioning",
}
SWAGGER_SETTINGS = {
"SECURITY_DEFINITIONS": {"basic": {"type": "basic"}},
"USE_SESSION_AUTH": True,
"DEFAULT_PAGINATOR_INSPECTORS": [
'vcr_server.inspector.PageNumberPaginatorInspectorClass',
],
}
CRED_TYPE_SYNONYMS = {
"registration": "registration.registries.ca",
"relationship": "relationship.registries.ca",
"business_number": "relationship.registries.ca",
}
# Set up core Snowplow environment for api tracking
SP_APP_ID = os.getenv("SP_TRACKING_APP_ID", "orgbook_api_local_dev")
SP_EMITTER = AsyncEmitter(
os.getenv("SP_TRACKING_EMITTER", "spm.apps.gov.bc.ca"),
protocol=os.getenv("SP_TRACKING_EMITTER_PROTOCOL", "https")
)
SP_TRACKER = Tracker(SP_EMITTER, encode_base64=False, app_id=SP_APP_ID)
LOGIN_URL = "rest_framework:login"
LOGOUT_URL = "rest_framework:logout"
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = "/api/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Set up support for proxy headers (provide correct URL in swagger UI)
USE_X_FORWARDED_HOST = True
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s"
},
"simple": {"format": "%(levelname)s %(message)s"},
},
"handlers": {
"console_handler": {
"class": "logging.StreamHandler",
"level": str(os.getenv("DJANGO_LOG_LEVEL", "WARN")).upper(),
"formatter": "verbose",
}
},
"loggers": {
"api": {
"handlers": ["console_handler"],
"level": str(os.getenv("DJANGO_LOG_LEVEL", "WARN")).upper(),
"propagate": False
},
"django": {
"handlers": ["console_handler"],
"level": str(os.getenv("DJANGO_LOG_LEVEL", "WARN")).upper(),
"propagate": False,
},
"django.request": {
"handlers": ["console_handler"],
"level": str(os.getenv("DJANGO_LOG_LEVEL", "WARN")).upper(),
"propagate": False,
},
# "django.db.backends": {"level": "DEBUG", "handlers": ["console_handler"]},
},
"root": {
"handlers": ["console_handler"],
"level": str(os.getenv("DJANGO_LOG_LEVEL", "WARN")).upper(),
"propagate": False,
}
}
if os.getenv("SQL_DEBUG"):
LOGGING["filters"]["require_debug_true"] = {
"()": "django.utils.log.RequireDebugTrue"
}
LOGGING["handlers"]["console"] = {
"level": "DEBUG",
"filters": ["require_debug_true"],
"class": "logging.StreamHandler",
}
LOGGING["loggers"]["django.db.backends"] = {
"level": "DEBUG",
"handlers": ["console"],
}
INDY_HOLDER_ID = "TheOrgBook_Holder"
APPLICATION_URL = os.getenv("APPLICATION_URL") or "http://localhost:8080"
API_METADATA = {
"title": "OrgBook BC API", | "organizations and businesses will also begin to issue digital records through "
"OrgBook BC. For example, permits and licenses issued by various government services.",
"terms": {"url": "https://www2.gov.bc.ca/gov/content/data/open-data"},
"contact": {"email": "[email protected]"},
"license": {
"name": "Open Government License - British Columbia",
"url": "https://www2.gov.bc.ca/gov/content/data/open-data/api-terms-of-use-for-ogl-information",
},
}
# Words 4 characters and over that shouldn't be considered significant when searching
SEARCH_SKIP_WORDS = [
"assoc",
"association",
"company",
"corp",
"corporation",
"enterprise",
"enterprises",
"entreprise",
"entreprises",
"incorporated",
"incorporée",
"incorporation",
"limited",
"limitée",
]
# Return partial matches
SEARCH_TERMS_EXCLUSIVE = False
#
# Read settings from a custom settings file
# based on the path provided as an input parameter
# The choice of the custom settings file is driven by the value of the THEME env
# variable (i.e. ongov)
#
custom_settings_file = Path(
BASE_DIR, "custom_settings_" + str(os.getenv("THEME")).lower() + ".py"
)
if custom_settings_file.exists():
with open(custom_settings_file) as source_file:
print("Loading custom settings file: {}".format(custom_settings_file.name))
exec(source_file.read())
################################################################################################
# The next section includes configurations specific to the webhook subscription functionality. #
# If you are not using the webhooks, you can comment out the following settings. #
################################################################################################
# django-rest-hooks settings
# authenticate REST hook services so only the subscriber can view/update their subscriptions
AUTHENTICATION_BACKENDS = ["subscriptions.icatrestauth.IcatAuthBackend"]
# wrapper function that delivers the web hook (passes the task to a rabbitmq worker)
HOOK_DELIVERER = "subscriptions.tasks.deliver_hook_wrapper"
# data model that triggers hooks to be sent - when a new record is added it triggers the hook services to run
HOOK_CUSTOM_MODEL = "subscriptions.models.CredentialHook"
# for hook events, checks the credential against the subscriptions to see which hooks to fire
HOOK_FINDER = "subscriptions.hook_utils.find_and_fire_hook"
# how to monitor for events - look for "created" events
HOOK_EVENTS = {
# 'any.event.name': 'App.Model.Action' (created/updated/deleted)
"hookable_cred.added": "subscriptions.HookableCredential.created+"
}
# celery settings
CELERY_BROKER_HEARTBEAT = 0 # see https://github.com/celery/celery/issues/4817
# task broker = rabbitmq
CELERY_BROKER_URL = "pyamqp://{}:{}@{}//".format(
os.environ.get("RABBITMQ_USER"),
os.environ.get("RABBITMQ_PASSWORD"),
os.environ.get("RABBITMQ_SVC_NAME", "rabbitmq"),
)
# database backend for retrieving task results
CELERY_TASK_BACKEND = "db+postgresql://{}:{}@{}/{}".format(
os.environ.get("DATABASE_USER"),
os.environ.get("DATABASE_PASSWORD"),
os.environ.get("DATABASE_SERVICE_NAME"),
os.environ.get("DATABASE_NAME"),
)
# custom hook settings
# max retries for http errors
HOOK_RETRY_THRESHOLD = os.environ.get("HOOK_RETRY_THRESHOLD", 3)
# number of seconds to wait between retries
HOOK_RETRY_DELAY = os.environ.get("HOOK_RETRY_DELAY", 5)
# max errors on a subscription before "expiring" the subscription
HOOK_MAX_SUBSCRIPTION_ERRORS = os.environ.get("HOOK_MAX_SUBSCRIPTION_ERRORS", 10)
###########################
# Enf of webhook settings #
###########################
# This string is used to alias the agent's self connection for verification
AGENT_SELF_CONNECTION_ALIAS = "credential-registry-self"
AGENT_ADMIN_URL = os.environ.get("AGENT_ADMIN_URL")
AGENT_ADMIN_API_KEY = os.environ.get("AGENT_ADMIN_API_KEY")
ADMIN_REQUEST_HEADERS = {"Content-Type": "application/json"}
if AGENT_ADMIN_API_KEY is not None and 0 < len(AGENT_ADMIN_API_KEY):
ADMIN_REQUEST_HEADERS["x-api-key"] = AGENT_ADMIN_API_KEY
# API routing middleware settings
HTTP_HEADER_ROUTING_MIDDLEWARE_URL_FILTER = "/api"
HTTP_AGENT_CALLBACK_MIDDLEWARE_URL_FILTER = "/agentcb"
HTTP_HEADER_ROUTING_MIDDLEWARE_ACCEPT_MAP = {
u"application/orgbook.bc.api+json": u"application/json"
}
HTTP_HEADER_ROUTING_MIDDLEWARE_VERSION_MAP = {
u"v2": u"v2",
u"v3": u"v3",
u"v4": u"v4",
u"alpha": u"v4",
u"latest": u"v3",
u"default": u"v2",
} | "description": "OrgBook BC is a public, searchable directory of digital records for registered "
"businesses in the Province of British Columbia. Over time, other government " |
eventhubreceiver.js | // Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
'use strict';
var EventData = require('./eventdata');
var Promise = require('bluebird');
var EventEmitter = require('events').EventEmitter;
var util = require('util');
/**
* @class EventHubReceiver
* @classdesc Constructs an {@linkcode EventHubReceiver} object
*/
var EventHubReceiver = function (amqpClient, endpoint) {
this.amqpClient = amqpClient;
this.endpoint = endpoint;
};
util.inherits(EventHubReceiver, EventEmitter);
// On receiver event received
EventHubReceiver.EventReceived = 'eventReceived'; | /* Notes: StartReceive shall handle retries
* onError shall be emitted after the retries have been exhausted
* EventHubReceiver shall support redirect
*/
/**
* The [StartReceive]{@linkcode EventHubReceiver#StartReceive} method starts
* receiving events from the event hub for the specified partition.
* @param startTime The startTime to use as filter for the events being received.
*/
EventHubReceiver.prototype.StartReceive = function (startTime) {
if (startTime !== null) {
console.log('Listening on endpoint ' + this.endpoint + ' start time: ' + startTime);
}
var rxName = 'eventhubclient-rx';
var rxOptions = { attach: { target: { address: rxName } } };
var self = this;
return new Promise(function (resolve) {
self.amqpClient.createReceiver(self.endpoint, rxOptions).then(function (amqpReceiver) {
amqpReceiver.on('message', function (message) {
var eventData = new EventData(message.body, message.annotations.value);
self.emit(EventHubReceiver.EventReceived, eventData);
});
amqpReceiver.on('errorReceived', function (rx_err) {
self.emit(EventHubReceiver.Error, rx_err);
});
});
resolve();
});
};
/**
* The [StartReceiveFromOffset]{@linkcode EventHubReceiver#StartReceiveFromOffset} method starts
* receiving events from the event hub, while filtering events starting at a certian offset.
* @param startOffset The start offset to use as filter for the events being received.
*/
EventHubReceiver.prototype.StartReceiveFromOffset = function (startOffset) {
if (startOffset !== null) {
console.log('Listening on endpoint ' + this.endpoint + ' start offset: ' + startOffset);
}
var self = this;
return new Promise(function (resolve) {
self.amqpClient.createReceiver(self.endpoint).then(function (amqpReceiver) {
amqpReceiver.on('message', function (message) {
var eventData = new EventData(message.body, message.annotations.value);
self.emit(EventHubReceiver.EventReceived, eventData);
});
amqpReceiver.on('errorReceived', function (rx_err) {
self.emit(EventHubReceiver.Error, rx_err);
});
});
resolve();
});
};
module.exports = EventHubReceiver; |
// On receive error
EventHubReceiver.Error = 'error';
|
application.go | // Copyright © 2019 The Vultr-cli Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"context"
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/vultr/vultr-cli/cmd/printer"
)
// Applications represents the application command
func A | ) *cobra.Command {
appsCmd := &cobra.Command{
Use: "apps",
Aliases: []string{"a"},
Short: "Display all available applications",
}
appsCmd.AddCommand(appsList)
appsList.Flags().StringP("cursor", "c", "", "(optional) Cursor for paging.")
appsList.Flags().IntP("per-page", "p", 100, "(optional) Number of items requested per page. Default is 100 and Max is 500.")
return appsCmd
}
var appsList = &cobra.Command{
Use: "list",
Short: "list applications",
Aliases: []string{"l"},
Run: func(cmd *cobra.Command, args []string) {
options := getPaging(cmd)
apps, meta, err := client.Application.List(context.Background(), options)
if err != nil {
fmt.Printf("error getting available applications : %v\n", err)
os.Exit(1)
}
printer.Application(apps, meta)
},
}
| pplications( |
get_namespaced_notebooks_parameters.go | // Code generated by go-swagger; DO NOT EDIT.
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
"github.com/go-openapi/runtime/middleware"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetNamespacedNotebooksParams creates a new GetNamespacedNotebooksParams object
// with the default values initialized.
func NewGetNamespacedNotebooksParams() GetNamespacedNotebooksParams |
// GetNamespacedNotebooksParams contains all the bound params for the get namespaced notebooks operation
// typically these are obtained from a http.Request
//
// swagger:parameters getNamespacedNotebooks
type GetNamespacedNotebooksParams struct {
// HTTP Request Object
HTTPRequest *http.Request `json:"-"`
/*
In: query
Default: ""
*/
ClusterName *string
/*
Required: true
In: path
*/
Namespace string
/*
In: query
Default: "1"
*/
Page *string
/*
In: query
Default: "10"
*/
Size *string
}
// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface
// for simple values it will use straight method calls.
//
// To ensure default values, the struct must have been initialized with NewGetNamespacedNotebooksParams() beforehand.
func (o *GetNamespacedNotebooksParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error {
var res []error
o.HTTPRequest = r
qs := runtime.Values(r.URL.Query())
qClusterName, qhkClusterName, _ := qs.GetOK("clusterName")
if err := o.bindClusterName(qClusterName, qhkClusterName, route.Formats); err != nil {
res = append(res, err)
}
rNamespace, rhkNamespace, _ := route.Params.GetOK("namespace")
if err := o.bindNamespace(rNamespace, rhkNamespace, route.Formats); err != nil {
res = append(res, err)
}
qPage, qhkPage, _ := qs.GetOK("page")
if err := o.bindPage(qPage, qhkPage, route.Formats); err != nil {
res = append(res, err)
}
qSize, qhkSize, _ := qs.GetOK("size")
if err := o.bindSize(qSize, qhkSize, route.Formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
// bindClusterName binds and validates parameter ClusterName from query.
func (o *GetNamespacedNotebooksParams) bindClusterName(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: false
// AllowEmptyValue: false
if raw == "" { // empty values pass all other validations
// Default values have been previously initialized by NewGetNamespacedNotebooksParams()
return nil
}
o.ClusterName = &raw
return nil
}
// bindNamespace binds and validates parameter Namespace from path.
func (o *GetNamespacedNotebooksParams) bindNamespace(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: true
// Parameter is provided by construction from the route
o.Namespace = raw
return nil
}
// bindPage binds and validates parameter Page from query.
func (o *GetNamespacedNotebooksParams) bindPage(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: false
// AllowEmptyValue: false
if raw == "" { // empty values pass all other validations
// Default values have been previously initialized by NewGetNamespacedNotebooksParams()
return nil
}
o.Page = &raw
return nil
}
// bindSize binds and validates parameter Size from query.
func (o *GetNamespacedNotebooksParams) bindSize(rawData []string, hasKey bool, formats strfmt.Registry) error {
var raw string
if len(rawData) > 0 {
raw = rawData[len(rawData)-1]
}
// Required: false
// AllowEmptyValue: false
if raw == "" { // empty values pass all other validations
// Default values have been previously initialized by NewGetNamespacedNotebooksParams()
return nil
}
o.Size = &raw
return nil
}
| {
var (
// initialize parameters with default values
clusterNameDefault = string("")
pageDefault = string("1")
sizeDefault = string("10")
)
return GetNamespacedNotebooksParams{
ClusterName: &clusterNameDefault,
Page: &pageDefault,
Size: &sizeDefault,
}
} |
__init__.py |
from dndgui.gui import MainForm | # -*- coding: utf-8 -*-
|
|
embed_and_filter.py | import numpy as np
import torch
from transformers import BertTokenizer, BertModel
from torch.utils.data import DataLoader
import util
from util import MaskableList
from collections import defaultdict, Counter
from sentence_transformers import SentenceTransformer
import spacy
import time
import itertools
from itertools import islice
import os
import argparse
from sklearn.preprocessing import normalize
from sqlitedict import SqliteDict
import ast
import pickle as pkl
import sqlite3
nlp = spacy.load("en_core_web_lg", disable=["ner"]) ## you only need the parser and tagger
## device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ##.to(device)
## NOTE: once debugging is ironed-out remove all print statements, csv file, and time study files, for AWS
'''usage: (if you use embed_and_filter_job_launcher.py)
python3 -u code/embed_and_filter_job_launcher.py \
you must change the command line arguments inside the embed_and_filter_job_launcher.py file
'''
'''usage: (if you use embed_and_filter.sh)
python3 -u code/embed_and_filter.py \
-job_id $i \
-outDir 'betatest/out/' \
-dataDir 'betatest/data/' \
-NUM_JOBS 2 \
-NUM_GPUS 2 \
-PROC_PER_GPU 1 \
-gpu_ids 0 1 \
-batch_size 175 \
-clip_len 225 \
-job_slices "job_slices.pkl" \
-query_sentences 'betatest/data/query_sentences.txt' \
-sentences_dict 'sentences.db' \
-trace_dict 'trace.db' \
-spacy_toks_dict 'spacy_toks.db' \
-spacy_pos_dict 'spacy_pos.db' \
-spacy_deps_dict 'spacy_deps.db' \
--BERT \
--MEAN \
> 'betatest/out/embed_and_filter_job'$i'.stdout' 2>&1
alternative: | tee betatest/out/stdout_job_array.txt) 3>&1 1>&2 2>&3 | tee betatest/out/stderr_job_array.txt
'''
'''global argparser'''
total_nword_embeddings, nskipped, time_elapsed_embedding, time_elapsed_filtering = 0, 0, 0, 0
bert_tokenizer, bert_model = None, None
parser = argparse.ArgumentParser(description='Processing list of files...')
parser.add_argument('-outDir', required=True, help='Directory where all outfiles will be written to. Example: out/')
parser.add_argument('-dataDir', required=True, help='Directory where all data files are located. Example: data/')
parser.add_argument('-job_id', required=True, help='job_id responsible for x-partition of the amalgams.')
# parser.add_argument('-NUM_JOBS', type=int, required=True, help='example: 5 (should match npartitions==NUM_GPUS)')
parser.add_argument('-batch_size', type=int, required=True, help='example: 400 (400 sentences in each batch)')
parser.add_argument('-clip_len', type=int, required=True, help='number of sentences to batch')
#parser.add_argument('-NUM_GPUS', type=int, required=True, help='number of GPUs')
#parser.add_argument('-PROC_PER_GPU', type=int, required=True, help='number of processes per GPU')
parser.add_argument('-gpu_id', type=int, required=True, help='list gpu_ids available separated by white space, i.e. - 3 4 5 16')
parser.add_argument('-job_slices', type=str, required=True, help="the job slices file output from create_amalgams.py. Example: 'job_slices.pkl'")
parser.add_argument('-query_sentences', type=str, required=True, help="query sentences filename. Example: 'query_sentences.txt'")
parser.add_argument('-sentences_dict', required=True, help="sqlite db filename. Example: 'sentences_dict.db'")
parser.add_argument('-trace_dict', required=True, help="sqlite db filename. Example: 'trace_dict.db'")
parser.add_argument('-spacy_toks_dict', required=True, help="sqlite db filename. Example: 'spacy_toks_dict.db'")
parser.add_argument('-spacy_pos_dict', required=True, help="sqlite db filename. Example: 'spacy_pos_dict.db'")
parser.add_argument('-spacy_deps_dict', required=True, help="sqlite db filename. Example: 'spacy_deps_dict.db'")
parser.add_argument('--BERT', action='store_false', dest='SBERT_flag', required=False, help='Enable BERT as the model')
parser.add_argument('--MEAN', action='store_false', dest='HEAD_flag', required=False, help='Calculates embeddings using the mean of the subword units')
parser.add_argument('--SBERT', action='store_true', dest='SBERT_flag', required=False, help='Enable SBERT as the model')
parser.add_argument('--HEAD', action='store_true', dest='HEAD_flag', required=False, help='Calculates embedding using only the headword embedding of the subword unit')
args = parser.parse_args()
'''global variables'''
## load job partition file
job_slices = util.pickle_load(args.outDir+args.job_slices)
print('\nlen(job_slices): {}'.format(len(job_slices)))
#################################################
########## Embed and Filter ############
#################################################
def embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks, spacy_pos, spacy_deps): ## , bert_tokenizer, bert_model, SBERT_flag, HEAD_flag
''' Takes in a batch of sentences and generates BERT embeddings for them.
Args:
Returns:
Note:
remove bert_tokenizer, bert_model, SBERT_flag, HEAD_flag from method signature when not running multiprocessing
make sure SBERT_flag, and HEAD_flag are added back in
'''
global time_elapsed_embedding, time_elapsed_filtering
global bert_tokenizer, bert_model, args
start_embed_time = time.time()
cur_words, cur_embeds = [], []
content_tags = ['ADJ', 'ADV', 'NOUN', 'VERB']
aux_tags = ['aux', 'auxpass', 'poss', 'possessive', 'cop', 'punct']
## tensor board, web ui (pytorch)
## perform lowercasing of all the sentences for embeddings
sent_iter=iter(sentence_batch)
lowercased_sentence_batch = [sent.lower() for sent in sent_iter]
if args.SBERT_flag:
return bert_model.encode([sentence])[0]
else:
##pytorch logging library
# try:
## batched encoding is a dict with keys = dict_keys(['input_ids', 'token_type_ids', 'attention_mask'])
NER_encoded_batch = [bert_tokenizer.batch_encode_plus(tok) for tok in spacy_toks] ## bert_NER_toks
# encoded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch) ## regular bert_toks
## We want BERT to process our examples all at once (as one lowercased_sentence_batch).
## For that reason, we need to pad all lists to the same size, so we can represent the input as one 2-d array.
padded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch, pad_to_max_length=True)
## Grab indices and attn masks from the padded lowercased_sentence_batch.
## We need to tell BERT to ignore (mask) the padding we've added when it's processed as input.
padded_input_ids, attention_masks = np.array(padded_batch['input_ids']), np.array(padded_batch['attention_mask'])
NER_iter = iter(NER_encoded_batch)
bert_NER_toks = [[bert_tokenizer.convert_ids_to_tokens(NER_unit)[1:-1] for NER_unit in cur_dict['input_ids']] for cur_dict in NER_iter]
padded_tinput_ids = torch.tensor(padded_input_ids).cuda() ##batched padded_input_ids converted to torch tensors
attention_masks = torch.tensor(attention_masks).cuda() ##batched attention_masks converted to torch tensors
# print('padded_tinput_ids.size()[1] ', padded_tinput_ids.size())
if padded_tinput_ids.size()[1] > args.clip_len:
print('\n\nclipping sentences round {} '.format(round_id))
# print('\nclipped sentences: ', sentence_batch)
# print('\nbert_NER_toks: ', bert_NER_toks)
# print(' after change round {} - type(padded_tinput_ids) and size: {} {} '.format(i, type(padded_tinput_ids), padded_tinput_ids.size()))
# bert_NER_toks = [NER_unit[:args.clip_len] for NER_unit in bert_NER_toks]
print('before padded_tinput_ids.size: ', padded_tinput_ids.size())
padded_batch = bert_tokenizer.batch_encode_plus(lowercased_sentence_batch, max_length=args.clip_len, pad_to_max_length=True)
padded_input_ids, attention_masks = np.array(padded_batch['input_ids']), np.array(padded_batch['attention_mask'])
print('padded_input_ids.dtype, attention_masks.dtype: ', padded_input_ids.dtype, attention_masks.dtype)
padded_tinput_ids = torch.tensor(padded_input_ids).cuda() ##batched padded_input_ids converted to torch tensors
attention_masks = torch.tensor(attention_masks).cuda() ##batched attention_masks converted to torch tensors
print('after padded_tinput_ids.size: ', padded_tinput_ids.size())
print('---end clipped sentences---')
print('\n\n')
# print('after having been clipped - padded_tinput_ids.size: ', padded_tinput_ids.size())
try:
with torch.no_grad():
embeds = bert_model(padded_tinput_ids, attention_mask=attention_masks)
except RuntimeError:
print('\n\nLine 143 CUDA out of memory. ')
print('padded_tinput_ids.size: ', padded_tinput_ids.size())
return -1
## Saves relevant word embeddings from the padding (removing [CLS] and [SEP] tokens)
## for each sentence, where the last token resides
mask_iter = iter(np.array(attention_masks.cpu()))
relevant_ids = np.array([[i,len(arr)-1-list(arr[::-1]).index(1)] for i, arr in enumerate(mask_iter)])
## changes [SEP] tokens attention to 0
attention_masks[relevant_ids[:,0], relevant_ids[:,1]]=0 ## temp[:,0] return 0th col for all rows, temp[:,1]] return 1st col for all rows. Change corresponding [row, col] in arrays to 0
## changes [CLS] tokens attention to 0
attention_masks[:,0]=0
## attention masks to be applied to relevant embeddings within each torch tensor
mask_iter, embeds_iter = iter(attention_masks), iter(embeds[0])
relevant_embeds = [MaskableList(sublist)[submask] for sublist, submask in zip(embeds_iter, mask_iter)]
## reflects the bert_NER full-token words (not bert's subword units)
pos_iter, dep_iter = iter(spacy_pos), iter(spacy_deps)
relevant_annotations_mask = [(np.in1d(cur_pos,content_tags)) & (~np.in1d(cur_dep,aux_tags)) for cur_pos, cur_dep in zip(pos_iter,dep_iter)]
embed_time = time.time() - start_embed_time
time_elapsed_embedding += embed_time
start_filter_time = time.time()
if args.HEAD_flag:
## use only embedding of the full-token word for each subword unit
for i in range(len(bert_NER_toks)):
end_index,j,k=0,0,0
while(j<len(relevant_embeds[i])):
end_index=end_index+len(bert_NER_toks[i][k])
if relevant_annotations_mask[i][k]:
cur_words.append((k,spacy_toks[i][k],(trace_batch[i][0], int(trace_batch[i][1]))))
## stack, mean, and numpy 'em
temp = torch.mean(torch.stack(relevant_embeds[i][j:j+1]),0).cpu().numpy()
cur_embeds.append(temp)
j,k=end_index,k+1
else:
# use mean of subwords units to calculate embeddings
try:
for i in range(len(bert_NER_toks)):
end_index,j,k=0,0,0
while(j<len(relevant_embeds[i])):
end_index=end_index+len(bert_NER_toks[i][k])
# if (round_id > 799 and round_id < 803) or (round_id > 984 and round_id < 988):
# print('i {}, k {}, len(bert_NER_toks[i]) {}, bert_NER_toks[i][k] {}'.format(i, k, len(bert_NER_toks[i]), bert_NER_toks[i][k]))
# print('bert_NER_toks[i]: ', bert_NER_toks[i])
if relevant_annotations_mask[i][k]:
cur_words.append((k,spacy_toks[i][k],(trace_batch[i][0], int(trace_batch[i][1]))))
## stack, mean, and numpy 'em
temp = torch.mean(torch.stack(relevant_embeds[i][j:end_index]),0).cpu().numpy() ##is this end_index or end_index+1
cur_embeds.append(temp)
j,k=end_index,k+1
except IndexError as e:
print('\n\n---IndexError: list index out of range!---')
print(e)
print('round_id: ', round_id)
print('i, k:', i, k)
print('len(sentence_batch), len(trace_batch[0]): ', len(sentence_batch), len(trace_batch[0]))
print('len(bert_NER_toks)', len(bert_NER_toks))
print('len(bert_NER_toks[i]): ', len(bert_NER_toks[i]))
# print('\nbert_NER_toks[i]: ', bert_NER_toks[i])
# print('\nbert_NER_toks', bert_NER_toks)
print('--end current error--\n\n')
filter_time = (time.time() - start_filter_time)
time_elapsed_filtering += filter_time
# print('round %d Time elapsed filtering content words:\t%s' % (round_id, time.strftime("%H:%M:%S", time.gmtime(filter_time))))
# except AttributeError:
# print('\n\n---AttributeError----NoneType object has no attribute batch_encode_plus!')
# print('spacy_toks: ')
# print(spacy_toks)
# print('trace_batch: ')
# print(trace_batch)
# print('sentence_batch: ')
# print(sentence_batch)
# print('print(list(sentence_batch)):')
# print(list(sentence_batch))
# print('---end of line---\n\n')
if round_id % 100 == 0:
print('finished batch {}. len(words): {} len(embeds): {}'.format(round_id, len(cur_words), len(cur_embeds))) |
def embed_all_batches(batched_sentences, batched_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps):
'''Iterates through giga_dict and batches sentences to send of embed_all_sentences().
Args:
Returns:
Note:
'''
global args, total_nword_embeddings
words, word_embeds = [], []
batch_iter, trace_iter, spacy_toks_iter, spacy_pos_iter, spacy_deps_iter = iter(batched_sentences), iter(batched_trace_info), iter(batched_spacy_toks), iter(batched_spacy_pos), iter(batched_spacy_deps)
for round_id, (sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) in enumerate(zip(batch_iter, trace_iter, spacy_toks_iter, spacy_pos_iter, spacy_deps_iter)):
if round_id % 100 == 0:
print('\nprocessing embedding {}... percentage processed {}'.format(round_id, (round_id/len(batched_sentences))*100))
cur_words, cur_embeds = embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) ## each batch is of size batch_size (see global var)
words.extend(cur_words)
word_embeds.extend(cur_embeds)
total_nword_embeddings += len(cur_embeds)
return words, word_embeds
def handle_batches(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps, words_dict, word_embeds_fname):
global args, job_slices, time_elapsed_embedding, time_elapsed_filtering
embed_time, filtering_time = 0, 0
batch_size, outDir = args.batch_size, args.outDir
print('size of batch: ', batch_size)
## Reads in gigaword file
# sentences, trace = read_file(gigaword_fname)
print('len(sentences), len(trace), len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps): ', len(cur_sentences), len(cur_trace), len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps))
## use pytorch library DataLoader to batch sentences and nlp annotations
batched_sentences = DataLoader(cur_sentences, batch_size=batch_size)
batched_trace_info = DataLoader(cur_trace, batch_size=batch_size, collate_fn=custom_collate)
batched_spacy_toks = DataLoader(cur_spacy_toks, batch_size=batch_size, collate_fn=custom_collate)
batched_spacy_pos = DataLoader(cur_spacy_pos, batch_size=batch_size, collate_fn=custom_collate)
batched_spacy_deps = DataLoader(cur_spacy_deps, batch_size=batch_size, collate_fn=custom_collate)
print('DataLoader (batch_size %d): %d %d %d %d %d' %(batch_size, len(batched_sentences), len(batched_trace_info), len(batched_spacy_toks), len(batched_spacy_pos), len(batched_spacy_deps)))
## Embeds sentences from all batches
words, word_embeds = embed_all_batches(batched_sentences, batched_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps)
print('these lengths should match: len(words): {}, len(word_embeds): {}, total_nword_embeds_check: {} '.format(len(words), len(word_embeds), total_nword_embeddings))
word_dict_start = time.time()
words_iter = iter(words)
idx_iter = range(len(words))
words_dict.update([(idx,word) for idx,word in zip(idx_iter,words_iter)])
words_dict.commit()
words_dict.close()
word_dict_time = time.time() - word_dict_start
## memmap word_embeds
memmap_start = time.time()
fp = np.memmap(word_embeds_fname, dtype='float32', mode='w+', shape=(len(word_embeds),768))
fp[:] = word_embeds[:]
del fp
memmap_time = time.time() - memmap_start
words_dict_fname = str(words_dict)[str(words_dict).index("(")+1:str(words_dict).index(")")]
## write shapes of each word_embedding job to a file to create word index later
with open(args.outDir+'shapes.txt','a') as fout:
fout.write(word_embeds_fname+' '+str(len(word_embeds))+'\n')
fout.write(words_dict_fname+' '+str(len(words))+'\n')
fout.close()
# print stats for sanity check
print('\n---stats---:')
print('total time embeddings docs: %s' % (time.strftime("%H:%M:%S", time.gmtime(time_elapsed_embedding))))
print('total time filtering content words: %s'% (time.strftime("%H:%M:%S", time.gmtime(time_elapsed_filtering))))
print('total time creating word_sqlite_dict: %s'% (time.strftime("%H:%M:%S", time.gmtime(word_dict_time))))
print('total elapsed copying word_embeds to memmap: %s'% (time.strftime("%H:%M:%S", time.gmtime(memmap_time))))
def create_query_matrix():
print('creating query matrix...')
global args
## xq files (query data)
xq_fname = args.outDir+'xq.dat' ## mmep query word embeddings
# qsents_fname = args.outDir+'qsents.pkl' ## sentences_dict
# qwords_fname = args.outDir+'qwords.pkl' ## qwords_dict
qsentences_dict, qwords_dict = SqliteDict(args.outDir+'qsentences.db'), SqliteDict(args.outDir+'qwords.db')
batch_size = args.batch_size
print('batch_size for query_matrix: ', batch_size)
xq, q_words, q_sentences, q_trace = [], [], [], []
## use len(query sentences as the batch_size)
## read in query sentences
with open(args.query_sentences, 'r') as fin:
for sent_id, line in enumerate(fin.read().splitlines()):
q_sentences.append(line.strip())
q_trace.append((args.query_sentences, sent_id))
print('len(q_sentences) and len(q_trace): ', len(q_sentences), len(q_trace))
spacy_docs = list(nlp.pipe(q_sentences)) ##no nead to clip len for spacy toks for the query matrix
spacy_toks = [[tok.text for tok in doc] for doc in spacy_docs]
spacy_pos = [[tok.pos_ for tok in doc] for doc in spacy_docs]
spacy_deps = [[tok.dep_ for tok in doc] for doc in spacy_docs]
## use pytorch library DataLoader to batch sentences and helper func batchify to batch spacy annotations
batched_q_sentences = DataLoader(q_sentences, batch_size=batch_size)
batched_q_trace_info = DataLoader(q_trace, batch_size=batch_size, collate_fn=custom_collate)
batched_spacy_toks = DataLoader(spacy_toks, batch_size=batch_size, collate_fn=custom_collate)
batched_spacy_pos = DataLoader(spacy_pos, batch_size=batch_size, collate_fn=custom_collate)
batched_spacy_deps = DataLoader(spacy_deps, batch_size=batch_size, collate_fn=custom_collate)
print('DataLoader (batch_size %d): %d %d %d %d %d' %(batch_size, len(batched_q_sentences), len(batched_q_trace_info), len(batched_spacy_toks), len(batched_spacy_pos), len(batched_spacy_deps)))
for round_id, (sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) in enumerate(zip(batched_q_sentences, batched_q_trace_info, batched_spacy_toks, batched_spacy_pos, batched_spacy_deps)):
cur_words, cur_embeds = embed_sentences(round_id, sentence_batch, trace_batch, spacy_toks_batch, spacy_pos_batch, spacy_deps_batch) ## each batch is of size batch_size (see global var)
q_words.extend(cur_words)
xq.extend([normalize([embed])[0] for embed in cur_embeds])
print('xq.shape: ', len(xq), len(xq[0]))
qwords_dict_fname = str(qwords_dict)[str(qwords_dict).index("(")+1:str(qwords_dict).index(")")]
with open(args.outDir+'shapes.txt','a') as fout:
fout.write(xq_fname+' '+str(len(xq))+'\n')
fout.write(qwords_dict_fname+' '+str(len(q_words))+'\n')
fout.close()
## memmap qword_embeds
fp = np.memmap(xq_fname, dtype='float32', mode='w+', shape=(len(xq),768))
fp[:] = xq[:]
del fp
qsentences_dict.update([(idx,sent) for idx,sent in enumerate(q_sentences)])
qwords_dict.update([(idx,qword) for idx,qword in enumerate(q_words)])
qsentences_dict.commit()
qwords_dict.commit()
qsentences_dict.close()
qwords_dict.close()
print('finished processing query matrix...')
# return xq_fname, qsents_fname, qwords_fname
#################################################
########### HELPER FUNCTIONS #########
#################################################
def get_partition_slice(it, size, section):
'''I could change this to only return the start and end index of each subarray instead of all the indices for that partition
'''
it = iter(it)
return list(iter(lambda: tuple(islice(it, size)), ()))[section]
def get_slice(it, size):
'''I could change this to only return the start and end index of each subarray instead of all the indices for that partition
'''
it = iter(it)
return list(iter(lambda: tuple(islice(it, size)), ()))
def custom_collate(x):
return x
def batchify(sentences, batch_size):
batched_items, this_batch, = [], []
for cur_item in islice(sentences,None,None):
this_batch.append(cur_item)
if len(this_batch) == batch_size:
batched_items.append(this_batch)
this_batch = []
if len(this_batch) > 0:
batched_items.append(this_batch)
return batched_items
def fast_read_from_sqlite_dict(sqlite_dict, start_index, end_index):
sqlite_dict_db = sqlite3.connect(sqlite_dict)
sqlite_dict_db_cursor = sqlite_dict_db.cursor()
sqlite_dict_db_cursor.execute("SELECT value FROM unnamed WHERE CAST(key as INTEGER) >= ? AND CAST(key as INTEGER) <= ?;", (start_index, end_index))
return [pkl.loads(x) for x in itertools.chain.from_iterable(sqlite_dict_db_cursor.fetchall())]
# import itertools
# trace_iter_1, trace_iter_2 = itertools.tee(trace_iter)
# cur_trace_data = [(value, key) for key, value in zip(trace_iter_1, fast_read_from_sqlite_dict(trace_data, trace_iter_2))]
## do sanity check in ipython on loading these dictionaries and reading in using fast read, find out how to do cur_trace
## be careful about the indexing, b/c it looks like whatever is indexed in fast read includes the end index, whereas in trace_iter = list(range(start, end)) end does not. So you might need to do +1 or -1
#################################################
########### Main #########
#################################################
def main(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps):
global args
print('did you make it here?')
## xb files
words_dict = SqliteDict(args.outDir+'words_job'+args.job_id+'.db')
word_embeds_fname = args.outDir+'word_embeds_job'+args.job_id+'.dat'
print('\nprocessing files for job {}...'.format(args.job_id))
start = time.time()
## Generates words and respective word_embeds for each partition of the sentence index
## and outputs them to outfolders to combine later for creating annoy index
print('handling batches for job %s...' % (args.job_id))
handle_batches(cur_sentences, cur_trace, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps, words_dict, word_embeds_fname)
handle_batches_time = time.time()-start
print('time handling batches: %s' % (time.strftime("%H:%M:%S", time.gmtime(handle_batches_time))))
print('finished job {}'.format(args.job_id))
if __name__ == '__main__':
main_begin = time.time()
print('---argparser---:')
for arg in vars(args):
print(arg, '\t', getattr(args, arg), '\t', type(arg))
# run processing on GPU <gpu_id>
cuda_idx = args.gpu_id
with torch.cuda.device(cuda_idx):
## initialize bert_tokenizer and bert_model as global variable for all jobs
if args.SBERT_flag:
print('loading SBERT')
## Loads SBERT
bert_tokenizer = None
bert_model = SentenceTransformer('bert-base-nli-mean-tokens') ## model = SentenceTransformer('bert-base-nli-stsb-mean-tokens')
bert_model = bert_model.cuda()
else:
print('loading regular BERT')
## Loads BERT-base uncased
## BERT-Base, Uncased: 12-layer, 768-hidden, 12-heads, 110M parameters
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert_model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True, output_attentions=True)
bert_model = bert_model.cuda()
# bert_model = apex.amp.initialize(bert_model, opt_level="O2").to(device)
if int(args.job_id) == 1:
print('\nOnly processing query matrix during job {}: '.format(args.job_id))
create_query_matrix()
# print("l\nen(sent_data): {}, len(trace_data): {}, len(spacy_toks): {} len(spacy_pos): {} len(spacy_deps): {}".format(len(sent_data), len(trace_data), len(spacy_toks), len(spacy_pos), len(spacy_deps)))
## get correct partition for this job
start_index, end_index = job_slices[int(args.job_id)-1]
print('\njob {} - start index: {} end index: {} len(cur_partition): {}'.format(args.job_id, start_index, end_index, end_index-start_index))
start = time.time()
cur_sent_data = fast_read_from_sqlite_dict(args.outDir+args.sentences_dict, start_index, end_index)
trace_iter = iter(list(range(start_index, end_index+1)))
cur_trace_data = [(value, key) for key, value in zip(trace_iter, fast_read_from_sqlite_dict(args.outDir+args.trace_dict, start_index, end_index))]
cur_spacy_toks = fast_read_from_sqlite_dict(args.outDir+args.spacy_toks_dict, start_index, end_index)
cur_spacy_pos = fast_read_from_sqlite_dict(args.outDir+args.spacy_pos_dict, start_index, end_index)
cur_spacy_deps = fast_read_from_sqlite_dict(args.outDir+args.spacy_deps_dict, start_index, end_index)
retrieve_time = time.time() - start
print('total elapsed time retrieving the current partition: %s'% (time.strftime("%H:%M:%S", time.gmtime(retrieve_time))))
print("\nlen(cur_sent_data): {}, len(cur_trace_data): {}".format(len(cur_sent_data), len(cur_trace_data)))
print("len(cur_spacy_toks): {} len(cur_spacy_pos): {} len(cur_spacy_deps): {}".format(len(cur_spacy_toks), len(cur_spacy_pos), len(cur_spacy_deps)))
main(cur_sent_data, cur_trace_data, cur_spacy_toks, cur_spacy_pos, cur_spacy_deps)
main_end = time.time() - main_begin
print('total time inside main: %s'% (time.strftime("%H:%M:%S", time.gmtime(main_end))))
# ## start job on partition of the sentence index
# split_size = int(len(sent_data)/args.NUM_JOBS)
# cur_partition = get_slice(list(range(len(sent_data))), split_size, (int(args.job_id)-1))
# print('job {} - start index {} end index {}'.format(args.job_id, cur_partition[0], cur_partition[-1]))
# if len(cur_partition)>=2:
# i, j = cur_partition[0], cur_partition[-1]
# main(sent_data[i:j+1], trace_data[i:j+1], spacy_toks[i:j+1], spacy_pos[i:j+1], spacy_deps[i:j+1])
# else:
# i = cur_partition[0]
# main(sent_data[i:], trace_data[i:], spacy_toks[i:], spacy_pos[i:], spacy_deps[i:])
'''
## To run this file:
## Create virtual environment on nlpgrid
python3 -m venv <path_for_virtual_environment>
## Reno example:
python3 -m venv ~/venv3/ ##becca's venv: giga
## Activate virtual environment
source <path_for_virtual_environment>/bin/activate
## Reno example:
source ~/venv3/bin/activate
e.g. ~/giga/bin/activate ##becca's venv: giga
## Install packages necessary
pip install nltk
pip install numpy
pip install tqdm
pip install torch==1.4.0
pip install transformers
pip install annoy
pip install faiss-gpu (see section on installing faiss for more info)
pip install sklearn
pip install -U sentence-transformers
pip install ndjson
pip install spacy
python3 -m spacy download en_core_web_lg
## confirm torch version
python3
>>>import torch
>>>print(torch.__version__) //should be 1.4.0
## installing faiss
to check which cuda version you have in nlpgrid
cat /usr/local/cuda/version.txt
for CPU version
conda install faiss-cpu -c pytorch
for GPU version
conda install faiss-cpu -c pytorch
conda install faiss-gpu cudatoolkit=8.0 -c pytorch # For CUDA8
conda install faiss-gpu cudatoolkit=9.0 -c pytorch # For CUDA9
conda install faiss-gpu cudatoolkit=10.0 -c pytorch # For CUDA10
for nlpgrid gpus
pip install faiss-gpu
## confirm faiss
python3
>>>import faiss
>>>import numpy as np
## confirm annoy
python3
>>>import annoy
>>>from annoy import AnnoyIndex
''' |
return cur_words, cur_embeds |
inflation.rs | use {
crate::cli::{CliCommand, CliCommandInfo, CliConfig, CliError, ProcessResult},
clap::{App, Arg, ArgMatches, SubCommand},
solana_clap_utils::{
input_parsers::{pubkeys_of, value_of},
input_validators::is_valid_pubkey,
keypair::*,
},
renec_cli_output::{
CliEpochRewardshMetadata, CliInflation, CliKeyedEpochReward, CliKeyedEpochRewards,
},
solana_client::rpc_client::RpcClient,
solana_remote_wallet::remote_wallet::RemoteWalletManager,
solana_sdk::{clock::Epoch, pubkey::Pubkey},
std::sync::Arc,
};
#[derive(Debug, PartialEq)]
pub enum InflationCliCommand {
Show,
Rewards(Vec<Pubkey>, Option<Epoch>),
}
pub trait InflationSubCommands {
fn inflation_subcommands(self) -> Self;
}
impl InflationSubCommands for App<'_, '_> {
fn inflation_subcommands(self) -> Self {
self.subcommand(
SubCommand::with_name("inflation")
.about("Show inflation information")
.subcommand(
SubCommand::with_name("rewards")
.about("Show inflation rewards for a set of addresses")
.arg(pubkey!(
Arg::with_name("addresses")
.value_name("ADDRESS")
.index(1)
.multiple(true)
.required(true),
"Address of account to query for rewards. "
))
.arg(
Arg::with_name("rewards_epoch")
.long("rewards-epoch")
.takes_value(true)
.value_name("EPOCH")
.help("Display rewards for specific epoch [default: latest epoch]"),
),
),
)
}
}
pub fn parse_inflation_subcommand(
matches: &ArgMatches<'_>,
_default_signer: &DefaultSigner,
_wallet_manager: &mut Option<Arc<RemoteWalletManager>>,
) -> Result<CliCommandInfo, CliError> {
let command = match matches.subcommand() {
("rewards", Some(matches)) => {
let addresses = pubkeys_of(matches, "addresses").unwrap();
let rewards_epoch = value_of(matches, "rewards_epoch");
InflationCliCommand::Rewards(addresses, rewards_epoch)
}
_ => InflationCliCommand::Show,
};
Ok(CliCommandInfo {
command: CliCommand::Inflation(command),
signers: vec![],
})
}
pub fn process_inflation_subcommand(
rpc_client: &RpcClient,
config: &CliConfig,
inflation_subcommand: &InflationCliCommand,
) -> ProcessResult {
match inflation_subcommand {
InflationCliCommand::Show => process_show(rpc_client, config),
InflationCliCommand::Rewards(ref addresses, rewards_epoch) => {
process_rewards(rpc_client, config, addresses, *rewards_epoch)
}
}
}
fn process_show(rpc_client: &RpcClient, config: &CliConfig) -> ProcessResult |
fn process_rewards(
rpc_client: &RpcClient,
config: &CliConfig,
addresses: &[Pubkey],
rewards_epoch: Option<Epoch>,
) -> ProcessResult {
let rewards = rpc_client
.get_inflation_reward(addresses, rewards_epoch)
.map_err(|err| {
if let Some(epoch) = rewards_epoch {
format!("Rewards not available for epoch {}", epoch)
} else {
format!("Rewards not available {}", err)
}
})?;
let epoch_schedule = rpc_client.get_epoch_schedule()?;
let mut epoch_rewards: Vec<CliKeyedEpochReward> = vec![];
let epoch_metadata = if let Some(Some(first_reward)) = rewards.iter().find(|&v| v.is_some()) {
let (epoch_start_time, epoch_end_time) =
crate::stake::get_epoch_boundary_timestamps(rpc_client, first_reward, &epoch_schedule)?;
for (reward, address) in rewards.iter().zip(addresses) {
let cli_reward = reward.as_ref().and_then(|reward| {
crate::stake::make_cli_reward(reward, epoch_start_time, epoch_end_time)
});
epoch_rewards.push(CliKeyedEpochReward {
address: address.to_string(),
reward: cli_reward,
});
}
let block_time = rpc_client.get_block_time(first_reward.effective_slot)?;
Some(CliEpochRewardshMetadata {
epoch: first_reward.epoch,
effective_slot: first_reward.effective_slot,
block_time,
})
} else {
None
};
let cli_rewards = CliKeyedEpochRewards {
epoch_metadata,
rewards: epoch_rewards,
};
Ok(config.output_format.formatted_string(&cli_rewards))
}
| {
let governor = rpc_client.get_inflation_governor()?;
let current_rate = rpc_client.get_inflation_rate()?;
let inflation = CliInflation {
governor,
current_rate,
};
Ok(config.output_format.formatted_string(&inflation))
} |
build.rs | /* Copyright (C) 2018 Olivier Goffart <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
use semver::Version;
fn main() {
eprintln!("cargo:warning={:?}", std::env::vars().collect::<Vec<_>>());
| let qt_include_path = std::env::var("DEP_QT_INCLUDE_PATH").unwrap();
let qt_library_path = std::env::var("DEP_QT_LIBRARY_PATH").unwrap();
let qt_version = std::env::var("DEP_QT_VERSION")
.unwrap()
.parse::<Version>()
.expect("Parsing Qt version failed");
let mut config = cpp_build::Config::new();
if cfg!(target_os = "macos") {
config.flag("-F");
config.flag(&qt_library_path);
}
if qt_version >= Version::new(6, 0, 0) {
config.flag_if_supported("-std=c++17");
config.flag_if_supported("/std:c++17");
}
config.include(&qt_include_path).build("src/lib.rs");
for minor in 7..=15 {
if qt_version >= Version::new(5, minor, 0) {
println!("cargo:rustc-cfg=qt_{}_{}", 5, minor);
}
}
let mut minor = 0;
while qt_version >= Version::new(6, minor, 0) {
println!("cargo:rustc-cfg=qt_{}_{}", 6, minor);
minor += 1;
}
} | |
statsd_server.go | // Package agent is the running Sensu agent. Agents connect to a Sensu backend,
// register their presence, subscribe to check channels, download relevant
// check packages, execute checks, and send results to the Sensu backend via
// the Event channel.
package agent
import (
"context"
"fmt"
"strings"
"time"
"github.com/atlassian/gostatsd"
"github.com/atlassian/gostatsd/pkg/statsd"
"github.com/echlebek/sensu-lite/transport"
"github.com/echlebek/sensu-lite/types"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
"golang.org/x/time/rate"
)
// NewStatsdServer provides a new statsd server for the sensu-agent.
func NewStatsdServer(a *Agent) *statsd.Server {
c := a.config.StatsdServer
s := NewServer()
backend, err := NewClientFromViper(s.Viper, a)
if err != nil {
logger.WithError(err).Error("failed to create sensu-statsd backend")
}
s.Backends = []gostatsd.Backend{backend}
if c.FlushInterval == 0 {
logger.Error("invalid statsd flush interval of 0, using the default 10s")
c.FlushInterval = DefaultStatsdFlushInterval
}
s.FlushInterval = time.Duration(c.FlushInterval) * time.Second
s.MetricsAddr = fmt.Sprintf("%s:%d", c.Host, c.Port)
s.StatserType = statsd.StatserNull
return s
}
// NewServer will create a new statsd Server with the default configuration.
func NewServer() *statsd.Server {
return &statsd.Server{
Backends: []gostatsd.Backend{},
Limiter: rate.NewLimiter(statsd.DefaultMaxCloudRequests, statsd.DefaultBurstCloudRequests),
InternalTags: statsd.DefaultInternalTags,
InternalNamespace: statsd.DefaultInternalNamespace,
DefaultTags: statsd.DefaultTags,
ExpiryInterval: statsd.DefaultExpiryInterval,
FlushInterval: statsd.DefaultFlushInterval,
MaxReaders: statsd.DefaultMaxReaders,
MaxParsers: statsd.DefaultMaxParsers,
MaxWorkers: statsd.DefaultMaxWorkers,
MaxQueueSize: statsd.DefaultMaxQueueSize,
MaxConcurrentEvents: statsd.DefaultMaxConcurrentEvents,
EstimatedTags: statsd.DefaultEstimatedTags,
MetricsAddr: statsd.DefaultMetricsAddr,
PercentThreshold: statsd.DefaultPercentThreshold,
IgnoreHost: statsd.DefaultIgnoreHost,
ConnPerReader: statsd.DefaultConnPerReader,
HeartbeatEnabled: statsd.DefaultHeartbeatEnabled,
ReceiveBatchSize: statsd.DefaultReceiveBatchSize,
CacheOptions: statsd.CacheOptions{
CacheRefreshPeriod: statsd.DefaultCacheRefreshPeriod,
CacheEvictAfterIdlePeriod: statsd.DefaultCacheEvictAfterIdlePeriod,
CacheTTL: statsd.DefaultCacheTTL,
CacheNegativeTTL: statsd.DefaultCacheNegativeTTL,
},
Viper: viper.New(),
}
}
// BackendName is the name of this statsd backend.
const BackendName = "sensu-statsd"
// Client is an object that is used to send messages to sensu-statsd.
type Client struct {
agent *Agent
}
// NewClientFromViper constructs a sensu-statsd backend.
func NewClientFromViper(v *viper.Viper, a *Agent) (gostatsd.Backend, error) {
return NewClient(a)
}
// NewClient constructs a sensu-statsd backend.
func NewClient(a *Agent) (*Client, error) {
return &Client{agent: a}, nil
}
// SendMetricsAsync flushes the metrics to the statsd backend which resides on
// the sensu-agent, preparing payload synchronously but doing the send asynchronously.
// Must not read/write MetricMap asynchronously.
func (c Client) SendMetricsAsync(ctx context.Context, metrics *gostatsd.MetricMap, cb gostatsd.SendCallback) {
now := time.Now().UnixNano()
metricsPoints := prepareMetrics(now, metrics)
go func() {
cb([]error{c.sendMetrics(metricsPoints)})
}()
}
// SendEvent sends event to the statsd backend which resides on the sensu-agent,
// not to be confused with the sensu-backend.
func (Client) SendEvent(ctx context.Context, e *gostatsd.Event) (retErr error) {
logger.WithField("event", e).Info("statsd received an event")
return nil
}
// Name returns the name of the backend.
func (Client) Name() string {
return BackendName
}
func prepareMetrics(now int64, metrics *gostatsd.MetricMap) []*types.MetricPoint {
var metricsPoints []*types.MetricPoint
metrics.Counters.Each(func(key, tagsKey string, counter gostatsd.Counter) {
tags := composeMetricTags(tagsKey)
counters := composeCounterPoints(counter, key, tags, now)
metricsPoints = append(metricsPoints, counters...)
})
metrics.Timers.Each(func(key, tagsKey string, timer gostatsd.Timer) {
tags := composeMetricTags(tagsKey)
timers := composeTimerPoints(timer, key, tags, now)
metricsPoints = append(metricsPoints, timers...)
})
metrics.Gauges.Each(func(key, tagsKey string, gauge gostatsd.Gauge) {
tags := composeMetricTags(tagsKey)
gauges := composeGaugePoints(gauge, key, tags, now)
metricsPoints = append(metricsPoints, gauges...)
})
metrics.Sets.Each(func(key, tagsKey string, set gostatsd.Set) {
tags := composeMetricTags(tagsKey)
sets := composeSetPoints(set, key, tags, now)
metricsPoints = append(metricsPoints, sets...)
})
return metricsPoints
}
func (c Client) sendMetrics(points []*types.MetricPoint) (retErr error) {
if points == nil {
return nil
}
metrics := &types.Metrics{
Points: points,
Handlers: c.agent.config.StatsdServer.Handlers,
}
event := &types.Event{
Entity: c.agent.getAgentEntity(),
Timestamp: time.Now().Unix(),
Metrics: metrics,
}
msg, err := c.agent.marshal(event)
if err != nil {
logger.WithError(err).Error("error marshaling metric event")
return err
}
logger.WithFields(logrus.Fields{
"metrics": event.Metrics,
"entity": event.Entity.Name,
}).Debug("sending statsd metrics")
tm := &transport.Message{
Type: transport.MessageTypeEvent,
Payload: msg,
}
c.agent.sendMessage(tm)
return nil
}
func composeMetricTags(tagsKey string) []*types.MetricTag {
tagsKeys := strings.Split(tagsKey, ",")
var tags []*types.MetricTag
var name, value string
for _, tag := range tagsKeys {
tagsValues := strings.Split(tag, ":")
if len(tagsValues) > 1 {
name = tagsValues[0]
value = tagsValues[1]
}
if tag != "" {
t := &types.MetricTag{
Name: name,
Value: value,
}
tags = append(tags, t)
}
}
return tags
}
func composeCounterPoints(counter gostatsd.Counter, key string, tags []*types.MetricTag, now int64) []*types.MetricPoint {
m0 := &types.MetricPoint{
Name: key + ".value",
Value: float64(counter.Value),
Timestamp: now,
Tags: tags,
}
m1 := &types.MetricPoint{
Name: key + ".per_second",
Value: float64(counter.PerSecond),
Timestamp: now,
Tags: tags,
}
points := []*types.MetricPoint{m0, m1}
return points
}
func composeTimerPoints(timer gostatsd.Timer, key string, tags []*types.MetricTag, now int64) []*types.MetricPoint {
m0 := &types.MetricPoint{
Name: key + ".min",
Value: timer.Min,
Timestamp: now,
Tags: tags,
}
m1 := &types.MetricPoint{
Name: key + ".max",
Value: timer.Max,
Timestamp: now,
Tags: tags, | m2 := &types.MetricPoint{
Name: key + ".count",
Value: float64(timer.Count),
Timestamp: now,
Tags: tags,
}
m3 := &types.MetricPoint{
Name: key + ".per_second",
Value: timer.PerSecond,
Timestamp: now,
Tags: tags,
}
m4 := &types.MetricPoint{
Name: key + ".mean",
Value: timer.Mean,
Timestamp: now,
Tags: tags,
}
m5 := &types.MetricPoint{
Name: key + ".median",
Value: timer.Median,
Timestamp: now,
Tags: tags,
}
m6 := &types.MetricPoint{
Name: key + ".stddev",
Value: timer.StdDev,
Timestamp: now,
Tags: tags,
}
m7 := &types.MetricPoint{
Name: key + ".sum",
Value: timer.Sum,
Timestamp: now,
Tags: tags,
}
m8 := &types.MetricPoint{
Name: key + ".sum_squares",
Value: timer.SumSquares,
Timestamp: now,
Tags: tags,
}
points := []*types.MetricPoint{m0, m1, m2, m3, m4, m5, m6, m7, m8}
for _, pct := range timer.Percentiles {
m := &types.MetricPoint{
Name: key + ".percentile_" + pct.Str,
Value: pct.Float,
Timestamp: now,
Tags: tags,
}
points = append(points, m)
}
return points
}
func composeGaugePoints(gauge gostatsd.Gauge, key string, tags []*types.MetricTag, now int64) []*types.MetricPoint {
m0 := &types.MetricPoint{
Name: key + ".value",
Value: float64(gauge.Value),
Timestamp: now,
Tags: tags,
}
points := []*types.MetricPoint{m0}
return points
}
func composeSetPoints(set gostatsd.Set, key string, tags []*types.MetricTag, now int64) []*types.MetricPoint {
m0 := &types.MetricPoint{
Name: key + ".value",
Value: float64(len(set.Values)),
Timestamp: now,
Tags: tags,
}
points := []*types.MetricPoint{m0}
return points
} | } |
nodes.js | class Token {
constructor(type, value, line) {
this.type = type;
this.value = value;
this.line = line;
}
}
class Node {
constructor(tag, line) {
this.tag = tag;
this.line = line;
}
}
class Primitive {
constructor(implementation) {
this.tag = "primitive";
this.implementation = implementation;
}
}
// Includs variable, operator
class VariableNode extends Node {
constructor(name, type, line) {
super("variable", line);
this.name = name;
this.type = type;
}
}
class ConstantNode extends Node {
constructor(value, line) {
super("constant", line);
this.value = value;
}
}
class ApplicationNode extends Node {
constructor(line) {
super("application", line);
}
setOperator(value) {
this.operator = value;
}
setOperands(value) {
this.operands = value;
}
}
class AssignmentNode extends Node {
constructor(line) {
super("assignment", line);
this.returnLeft = false;
}
setLeft(value) {
this.left = value;
}
setRight(value) {
this.right = value;
}
setReturnLeft() {
this.returnLeft = true;
}
}
class VarDefNode extends Node {
constructor(line) {
super("var_definition", line);
}
setLeft(value) {
this.left = value;
}
setRight(value) {
this.right = value;
}
}
class ReturnNode extends Node {
constructor(line, expression) {
super("return", line);
this.expression = expression;
}
}
class RangeNode extends Node {
constructor(line) {
super("range", line);
this.closed = false;
}
setFrom(value) {
this.from = value;
}
setTo(value) {
this.to = value;
}
setClosed() {
this.closed = true;
}
}
class FuncDefNode extends Node {
constructor(line) {
super('function_definition', line);
this.is_class = false;
}
setName(value) {
this.name = value;
}
setParent(value) {
this.parent = value;
}
setParameters(value) {
this.parameters = value;
}
setBody(value) {
this.body = value;
}
setClass() {
this.is_class = true;
}
}
class CaseNode extends Node {
constructor(line) {
super("case", line);
}
setValue(value) {
this.value = value;
}
setStmt(value) {
this.stmt = value;
}
}
class SwitchNode extends Node {
constructor(line) {
super("switch", line);
}
setVariable(value) {
this.variable = value;
}
setCases(value) {
this.cases = value;
}
setDefault(value) {
this.default = value;
}
}
class ConditionNode extends Node {
constructor(type, line) {
super(type, line);
}
setPredicate(value) {
this.predicate = value;
}
setConsequent(value) {
this.consequent = value;
}
setAlternative(value) {
this.alternative = value;
}
}
class IfNode extends ConditionNode {
constructor(line) {
super('if', line);
}
}
class WhileNode extends ConditionNode {
constructor(line) {
super('while', line);
}
}
class DoWhileNode extends ConditionNode {
constructor(line) {
super('do', line);
}
}
class ForNode extends Node {
constructor(line) {
super("for", line);
}
setVariable(value) {
this.variable = value;
}
setRange(value) {
this.range = value;
}
setIncrement(value) {
this.increment = value;
}
setConsequent(value) {
this.consequent = value;
}
}
class ContinueNode extends Node {
constructor(line) {
super("continue", line);
}
}
class BreakNode extends Node {
constructor(line) {
super("break", line);
}
}
class FallthroughNode extends Node {
constructor(line) { |
class ContinueValue extends Node {
constructor(line) {
super("continue_value", line);
}
}
class BreakValue extends Node {
constructor(line) {
super("break_value", line);
}
}
class FallthroughValue extends Node {
constructor(line) {
super("fallthrough_value", line);
}
}
class ReturnValue extends Node {
constructor(content, line) {
super("return_value", line);
this.content = content;
}
}
class FunctionValue extends Node {
constructor(name, parameters, body, env, hasParent, isClass, line) {
super("function_value", line);
this.name = name;
this.parameters = parameters;
this.body = body;
this.environment = env;
this.has_parent = hasParent;
this.is_class = isClass;
this.line = line;
}
} | super("fallthrough", line);
}
} |
data_loader.py | import torch
from torchvision import datasets, transforms
import os
transform = {
"train": transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
[0.4914, 0.4821, 0.4465], [0.2470, 0.2435, 0.2616]
),
]
),
"val": transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.4940, 0.4849, 0.4502], [0.2467, 0.2430, 0.2616]
),
]
),
}
def | (root, batch_size, num_workers):
dataset = {
x: datasets.ImageFolder(os.path.join(root, x), transform=transform[x])
for x in ["train", "val"]
}
data_loader = {
x: torch.utils.data.DataLoader(
dataset[x], batch_size=batch_size, shuffle=(x == "train"),
num_workers=num_workers,
)
for x in ["train", "val"]
}
dataset_size = {x: len(dataset[x]) for x in ["train", "val"]}
return data_loader, dataset_size
def CIFAR10(batch_size, root="data/"):
dataset = {
x: datasets.CIFAR10(
root, train=(x == "train"), download=True, transform=transform[x]
)
for x in ["train", "val"]
}
data_loader = {
x: torch.utils.data.DataLoader(
dataset[x], batch_size=batch_size, shuffle=(x == "train")
)
for x in ["train", "val"]
}
dataset_size = {x: len(dataset[x]) for x in ["train", "val"]}
return data_loader, dataset_size
| get_loader |
_validate.py | # coding=utf-8
"""
Verifies that all configuration values have a valid setting
"""
from elib_config._setup import ELIBConfig
# noinspection PyProtectedMember
from elib_config._value._config_value import ConfigValue
# noinspection PyProtectedMember
from elib_config._value._exc import DuplicateConfigValueError, MissingValueError
def | (raise_=True):
"""
Verifies that all configuration values have a valid setting
"""
ELIBConfig.check()
known_paths = set()
duplicate_values = set()
missing_values = set()
for config_value in ConfigValue.config_values:
if config_value.path not in known_paths:
known_paths.add(config_value.path)
else:
duplicate_values.add(config_value.name)
try:
config_value()
except MissingValueError:
missing_values.add(config_value.name)
if raise_ and duplicate_values:
raise DuplicateConfigValueError(str(duplicate_values))
if raise_ and missing_values:
raise MissingValueError(str(missing_values), 'missing config value(s)')
return duplicate_values, missing_values
| validate_config |
update_snapshot_plan_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package operations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/Yuyz0112/cloudtower-go-sdk/models"
)
// UpdateSnapshotPlanReader is a Reader for the UpdateSnapshotPlan structure.
type UpdateSnapshotPlanReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *UpdateSnapshotPlanReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewUpdateSnapshotPlanOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewUpdateSnapshotPlanBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewUpdateSnapshotPlanOK creates a UpdateSnapshotPlanOK with default headers values
func NewUpdateSnapshotPlanOK() *UpdateSnapshotPlanOK |
/* UpdateSnapshotPlanOK describes a response with status code 200, with default header values.
Ok
*/
type UpdateSnapshotPlanOK struct {
Payload []*models.WithTaskSnapshotPlan
}
func (o *UpdateSnapshotPlanOK) Error() string {
return fmt.Sprintf("[POST /update-snapshot-plan][%d] updateSnapshotPlanOK %+v", 200, o.Payload)
}
func (o *UpdateSnapshotPlanOK) GetPayload() []*models.WithTaskSnapshotPlan {
return o.Payload
}
func (o *UpdateSnapshotPlanOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewUpdateSnapshotPlanBadRequest creates a UpdateSnapshotPlanBadRequest with default headers values
func NewUpdateSnapshotPlanBadRequest() *UpdateSnapshotPlanBadRequest {
return &UpdateSnapshotPlanBadRequest{}
}
/* UpdateSnapshotPlanBadRequest describes a response with status code 400, with default header values.
UpdateSnapshotPlanBadRequest update snapshot plan bad request
*/
type UpdateSnapshotPlanBadRequest struct {
Payload string
}
func (o *UpdateSnapshotPlanBadRequest) Error() string {
return fmt.Sprintf("[POST /update-snapshot-plan][%d] updateSnapshotPlanBadRequest %+v", 400, o.Payload)
}
func (o *UpdateSnapshotPlanBadRequest) GetPayload() string {
return o.Payload
}
func (o *UpdateSnapshotPlanBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| {
return &UpdateSnapshotPlanOK{}
} |
xiv_proxy.py | # Copyright (c) 2016 IBM Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import datetime
import re
import six
import socket
from oslo_log import log as logging
from oslo_utils import importutils
pyxcli = importutils.try_import("pyxcli")
if pyxcli:
from pyxcli import client
from pyxcli import errors
from pyxcli.events import events
from pyxcli.mirroring import mirrored_entities
from pyxcli import transports
from cinder import context
from cinder.i18n import _
from cinder.objects import fields
from cinder import volume as c_volume
import cinder.volume.drivers.ibm.ibm_storage as storage
from cinder.volume.drivers.ibm.ibm_storage import certificate
from cinder.volume.drivers.ibm.ibm_storage import cryptish
from cinder.volume.drivers.ibm.ibm_storage import proxy
from cinder.volume.drivers.ibm.ibm_storage import strings
from cinder.volume.drivers.ibm.ibm_storage import xiv_replication as repl
from cinder.volume import group_types
from cinder.volume import qos_specs
from cinder.volume import utils
from cinder.volume import volume_types
OPENSTACK_PRODUCT_NAME = "OpenStack"
PERF_CLASS_NAME_PREFIX = "cinder-qos"
HOST_BAD_NAME = "HOST_BAD_NAME"
VOLUME_IS_MAPPED = "VOLUME_IS_MAPPED"
CONNECTIONS_PER_MODULE = 2
MIN_LUNID = 1
MAX_LUNID = 511
SYNC = 'sync'
ASYNC = 'async'
SYNC_TIMEOUT = 300
SYNCHED_STATES = ['synchronized', 'rpo ok']
PYXCLI_VERSION = '1.1.6'
LOG = logging.getLogger(__name__)
# performance class strings - used in exceptions
PERF_CLASS_ERROR = _("Unable to create or get performance class: %(details)s")
PERF_CLASS_ADD_ERROR = _("Unable to add volume to performance class: "
"%(details)s")
PERF_CLASS_VALUES_ERROR = _("A performance class with the same name but "
"different values exists: %(details)s")
# setup strings - used in exceptions
SETUP_BASE_ERROR = _("Unable to connect to %(title)s: %(details)s")
SETUP_INVALID_ADDRESS = _("Unable to connect to the storage system "
"at '%(address)s', invalid address.")
# create volume strings - used in exceptions
CREATE_VOLUME_BASE_ERROR = _("Unable to create volume: %(details)s")
# initialize connection strings - used in exceptions
CONNECTIVITY_FC_NO_TARGETS = _("Unable to detect FC connection between the "
"compute host and the storage, please ensure "
"that zoning is set up correctly.")
# terminate connection strings - used in logging
TERMINATE_CONNECTION_BASE_ERROR = ("Unable to terminate the connection "
"for volume '%(volume)s': %(error)s.")
TERMINATE_CONNECTION_HOST_ERROR = ("Terminate connection for volume "
"'%(volume)s': for volume '%(volume)s': "
"%(host)s %(error)s.")
# delete volume strings - used in logging
DELETE_VOLUME_BASE_ERROR = ("Unable to delete volume '%(volume)s': "
"%(error)s.")
# manage volume strings - used in exceptions
MANAGE_VOLUME_BASE_ERROR = _("Unable to manage the volume '%(volume)s': "
"%(error)s.")
INCOMPATIBLE_PYXCLI = _('Incompatible pyxcli found. Mininum: %(required)s '
'Found: %(found)s')
class XIVProxy(proxy.IBMStorageProxy):
"""Proxy between the Cinder Volume and Spectrum Accelerate Storage.
Supports IBM XIV, Spectrum Accelerate, A9000, A9000R
Version: 2.3.0
Required pyxcli version: 1.1.6
.. code:: text
2.0 - First open source driver version
2.1.0 - Support Consistency groups through Generic volume groups
- Support XIV/A9000 Volume independent QoS
- Support groups replication
2.3.0 - Support Report backend state
"""
def __init__(self, storage_info, logger, exception,
driver=None, active_backend_id=None, host=None):
"""Initialize Proxy."""
if not active_backend_id:
active_backend_id = strings.PRIMARY_BACKEND_ID
proxy.IBMStorageProxy.__init__(
self, storage_info, logger, exception, driver, active_backend_id)
LOG.info("__init__: storage_info: %(keys)s",
{'keys': self.storage_info})
if active_backend_id:
LOG.info("__init__: active_backend_id: %(id)s",
{'id': active_backend_id})
self.ibm_storage_cli = None
self.meta['ibm_storage_portal'] = None
self.meta['ibm_storage_iqn'] = None
self.ibm_storage_remote_cli = None
self.meta['ibm_storage_fc_targets'] = []
self.meta['storage_version'] = None
self.system_id = None
@proxy._trace_time
def setup(self, context):
msg = ''
if pyxcli:
if pyxcli.version < PYXCLI_VERSION:
msg = (INCOMPATIBLE_PYXCLI %
{'required': PYXCLI_VERSION,
'found': pyxcli.version
})
else:
msg = (SETUP_BASE_ERROR %
{'title': strings.TITLE,
'details': "IBM Python XCLI Client (pyxcli) not found"
})
if msg != '':
LOG.error(msg)
raise self._get_exception()(msg)
"""Connect ssl client."""
LOG.info("Setting up connection to %(title)s...\n"
"Active backend_id: '%(id)s'.",
{'title': strings.TITLE,
'id': self.active_backend_id})
self.ibm_storage_cli = self._init_xcli(self.active_backend_id)
if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI:
self.meta['ibm_storage_iqn'] = (
self._call_xiv_xcli("config_get").
as_dict('name')['iscsi_name'].value)
portals = storage.get_online_iscsi_ports(self.ibm_storage_cli)
if len(portals) == 0:
msg = (SETUP_BASE_ERROR,
{'title': strings.TITLE,
'details': "No iSCSI portals available on the Storage."
})
raise self._get_exception()(
_("%(prefix)s %(portals)s") %
{'prefix': storage.XIV_LOG_PREFIX,
'portals': msg})
self.meta['ibm_storage_portal'] = "%s:3260" % portals[:1][0]
remote_id = self._get_secondary_backend_id()
if remote_id:
self.ibm_storage_remote_cli = self._init_xcli(remote_id)
self._event_service_start()
self._get_pool()
LOG.info("IBM Storage %(common_ver)s "
"xiv_proxy %(proxy_ver)s. ",
{'common_ver': self.full_version,
'proxy_ver': self.full_version})
self._update_system_id()
if remote_id:
self._update_active_schedule_objects()
self._update_remote_schedule_objects()
LOG.info("Connection to the IBM storage "
"system established successfully.")
@proxy._trace_time
def _update_active_schedule_objects(self):
"""Set schedule objects on active backend.
The value 00:20:00 is covered in XIV by a pre-defined object named
min_interval.
"""
schedules = self._call_xiv_xcli("schedule_list").as_dict('name')
for rate in repl.Replication.async_rates:
if rate.schedule == '00:00:20':
continue
name = rate.schedule_name
schedule = schedules.get(name, None)
if schedule:
LOG.debug('Exists on local backend %(sch)s', {'sch': name})
interval = schedule.get('interval', '')
if interval != rate.schedule:
msg = (_("Schedule %(sch)s exists with incorrect "
"value %(int)s")
% {'sch': name, 'int': interval})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
else:
LOG.debug('create %(sch)s', {'sch': name})
try:
self._call_xiv_xcli("schedule_create",
schedule=name, type='interval',
interval=rate.schedule)
except errors.XCLIError:
msg = (_("Setting up Async mirroring failed, "
"schedule %(sch)s is not supported on system: "
" %(id)s.")
% {'sch': name, 'id': self.system_id})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
@proxy._trace_time
def _update_remote_schedule_objects(self):
"""Set schedule objects on remote backend.
The value 00:20:00 is covered in XIV by a pre-defined object named
min_interval.
"""
schedules = self._call_remote_xiv_xcli("schedule_list").as_dict('name')
for rate in repl.Replication.async_rates:
if rate.schedule == '00:00:20':
continue
name = rate.schedule_name
if schedules.get(name, None):
LOG.debug('Exists on remote backend %(sch)s', {'sch': name})
interval = schedules.get(name, None)['interval']
if interval != rate.schedule:
msg = (_("Schedule %(sch)s exists with incorrect "
"value %(int)s")
% {'sch': name, 'int': interval})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
else:
try:
self._call_remote_xiv_xcli("schedule_create",
schedule=name, type='interval',
interval=rate.schedule)
except errors.XCLIError:
msg = (_("Setting up Async mirroring failed, "
"schedule %(sch)s is not supported on system: "
" %(id)s.")
% {'sch': name, 'id': self.system_id})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
def _get_extra_specs(self, type_id):
"""get extra specs to match the type_id
type_id can derive from volume or from consistency_group
"""
if type_id is None:
return {}
return c_volume.volume_types.get_volume_type_extra_specs(type_id)
def _update_system_id(self):
if self.system_id:
return
local_ibm_storage_cli = self._init_xcli(strings.PRIMARY_BACKEND_ID)
if not local_ibm_storage_cli:
LOG.error('Failed to connect to main backend. '
'Cannot retrieve main backend system_id')
return
system_id = local_ibm_storage_cli.cmd.config_get().as_dict(
'name')['system_id'].value
LOG.debug('system_id: %(id)s', {'id': system_id})
self.system_id = system_id
@proxy._trace_time
def _get_qos_specs(self, type_id):
"""Gets the qos specs from cinder."""
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
if not volume_type:
return None
qos_specs_id = volume_type.get('qos_specs_id', None)
if qos_specs_id:
return qos_specs.get_qos_specs(
ctxt, qos_specs_id).get('specs', None)
return None
@proxy._trace_time
def _qos_create_kwargs_for_xcli(self, specs):
args = {}
for key in specs:
if key == 'bw':
args['max_bw_rate'] = specs[key]
if key == 'iops':
args['max_io_rate'] = specs[key]
return args
def _qos_remove_vol(self, volume):
try:
self._call_xiv_xcli("perf_class_remove_vol",
vol=volume['name'])
except errors.VolumeNotConnectedToPerfClassError as e:
details = self._get_code_and_status_or_message(e)
LOG.debug(details)
return True
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg_data = (_("Unable to add volume to performance "
"class: %(details)s") % {'details': details})
LOG.error(msg_data)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg_data)
return True
def _qos_add_vol(self, volume, perf_class_name):
try:
self._call_xiv_xcli("perf_class_add_vol",
vol=volume['name'],
perf_class=perf_class_name)
except errors.VolumeAlreadyInPerfClassError as e:
details = self._get_code_and_status_or_message(e)
LOG.debug(details)
return True
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg = PERF_CLASS_ADD_ERROR % {'details': details}
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
return True
def _check_perf_class_on_backend(self, specs):
"""Checking if class exists on backend. if not - create it."""
perf_class_name = PERF_CLASS_NAME_PREFIX
if specs is None or specs == {}:
return ''
for key, value in sorted(specs.items()):
perf_class_name += '_' + key + '_' + value
try:
classes_list = self._call_xiv_xcli("perf_class_list",
perf_class=perf_class_name
).as_list
# list is not empty, check if class has the right values
for perf_class in classes_list:
if (not perf_class.get('max_iops',
None) == specs.get('iops', '0') or
not perf_class.get('max_bw',
None) == specs.get('bw', '0')):
raise self.meta['exception'].VolumeBackendAPIException(
data=PERF_CLASS_VALUES_ERROR %
{'details': perf_class_name})
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg = PERF_CLASS_ERROR % {'details': details}
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
# class does not exist, create it
if not classes_list:
self._create_qos_class(perf_class_name, specs)
return perf_class_name
def _get_type_from_perf_class_name(self, perf_class_name):
_type = re.findall('type_(independent|shared)', perf_class_name)
return _type[0] if _type else None
def _create_qos_class(self, perf_class_name, specs):
"""Create the qos class on the backend."""
try:
# check if we have a shared (default) perf class
# or an independent perf class
_type = self._get_type_from_perf_class_name(perf_class_name)
if _type:
self._call_xiv_xcli("perf_class_create",
perf_class=perf_class_name,
type=_type)
else:
self._call_xiv_xcli("perf_class_create",
perf_class=perf_class_name)
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg = PERF_CLASS_ERROR % {'details': details}
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
try:
args = self._qos_create_kwargs_for_xcli(specs)
self._call_xiv_xcli("perf_class_set_rate",
perf_class=perf_class_name,
**args)
return perf_class_name
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
# attempt to clean up
self._call_xiv_xcli("perf_class_delete",
perf_class=perf_class_name)
msg = PERF_CLASS_ERROR % {'details': details}
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
def _qos_specs_from_volume(self, volume):
"""Returns qos_specs of volume.
checks if there is a type on the volume
if so, checks if it has been associated with a qos class
returns the name of that class
"""
type_id = volume.get('volume_type_id', None)
if not type_id:
return None
return self._get_qos_specs(type_id)
def _get_replication_info(self, specs):
info, msg = repl.Replication.extract_replication_info_from_specs(specs)
if not info:
LOG.error(msg)
raise self._get_exception()(message=msg)
return info
@proxy._trace_time
def _create_volume(self, volume):
"""Internal implementation to create a volume."""
size = storage.gigabytes_to_blocks(float(volume['size']))
pool = self._get_backend_pool()
try:
self._call_xiv_xcli(
"vol_create", vol=volume['name'], size_blocks=size, pool=pool)
except errors.SystemOutOfSpaceError:
msg = _("Unable to create volume: System is out of space.")
LOG.error(msg)
raise self._get_exception()(msg)
except errors.PoolOutOfSpaceError:
msg = (_("Unable to create volume: pool '%(pool)s' is "
"out of space.")
% {'pool': pool})
LOG.error(msg)
raise self._get_exception()(msg)
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg = (CREATE_VOLUME_BASE_ERROR, {'details': details})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
@proxy._trace_time
def create_volume(self, volume):
"""Creates a volume."""
# read replication information
specs = self._get_extra_specs(volume.get('volume_type_id', None))
replication_info = self._get_replication_info(specs)
self._create_volume(volume)
return self.handle_created_vol_properties(replication_info,
volume)
def handle_created_vol_properties(self, replication_info, volume):
volume_update = {}
LOG.debug('checking replication_info %(rep)s',
{'rep': replication_info})
volume_update['replication_status'] = 'disabled'
cg = volume.group and utils.is_group_a_cg_snapshot_type(volume.group)
if replication_info['enabled']:
try:
repl.VolumeReplication(self).create_replication(
volume.name, replication_info)
except Exception as e:
details = self._get_code_and_status_or_message(e)
msg = ('Failed create_replication for '
'volume %(vol)s: %(err)s',
{'vol': volume['name'], 'err': details})
LOG.error(msg)
if cg:
cg_name = self._cg_name_from_volume(volume)
self._silent_delete_volume_from_cg(volume, cg_name)
self._silent_delete_volume(volume=volume)
raise
volume_update['replication_status'] = 'enabled'
if cg:
if volume.group.is_replicated:
# for replicated Consistency Group:
# The Volume must be mirrored, and its mirroring settings must
# be identical to those of the Consistency Group:
# mirroring type (e.g., synchronous),
# mirroring status, mirroring target(backend)
group_specs = group_types.get_group_type_specs(
volume.group.group_type_id)
group_rep_info = self._get_replication_info(group_specs)
msg = None
if volume_update['replication_status'] != 'enabled':
msg = ('Cannot add non-replicated volume into'
' replicated group')
elif replication_info['mode'] != group_rep_info['mode']:
msg = ('Volume replication type and Group replication type'
' should be the same')
elif volume.host != volume.group.host:
msg = 'Cannot add volume to Group on different host'
elif volume.group['replication_status'] == 'enabled':
# if group is mirrored and enabled, compare state.
group_name = self._cg_name_from_group(volume.group)
me = mirrored_entities.MirroredEntities(
self.ibm_storage_cli)
me_objs = me.get_mirror_resources_by_name_map()
vol_obj = me_objs['volumes'][volume.name]
vol_sync_state = vol_obj['sync_state']
cg_sync_state = me_objs['cgs'][group_name]['sync_state']
if (vol_sync_state != 'Synchronized' or
cg_sync_state != 'Synchronized'):
msg = ('Cannot add volume to Group. Both volume and '
'group should have sync_state = Synchronized')
if msg:
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
try:
cg_name = self._cg_name_from_volume(volume)
self._call_xiv_xcli(
"cg_add_vol", vol=volume['name'], cg=cg_name)
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
self._silent_delete_volume(volume=volume)
msg = (CREATE_VOLUME_BASE_ERROR, {'details': details})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
perf_class_name = None
specs = self._qos_specs_from_volume(volume)
if specs:
try:
perf_class_name = self._check_perf_class_on_backend(specs)
if perf_class_name:
self._call_xiv_xcli("perf_class_add_vol",
vol=volume['name'],
perf_class=perf_class_name)
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
if cg:
cg_name = self._cg_name_from_volume(volume)
self._silent_delete_volume_from_cg(volume, cg_name)
self._silent_delete_volume(volume=volume)
msg = PERF_CLASS_ADD_ERROR % {'details': details}
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
return volume_update
@proxy._trace_time
def enable_replication(self, context, group, volumes):
"""Enable cg replication"""
# fetch replication info
group_specs = group_types.get_group_type_specs(group.group_type_id)
if not group_specs:
msg = 'No group specs inside group type'
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
# Add this field to adjust it to generic replication (for volumes)
replication_info = self._get_replication_info(group_specs)
if utils.is_group_a_cg_snapshot_type(group):
# take every vol out of cg - we can't mirror the cg otherwise.
if volumes:
self._update_consistencygroup(context, group,
remove_volumes=volumes)
for volume in volumes:
enabled_status = fields.ReplicationStatus.ENABLED
if volume['replication_status'] != enabled_status:
repl.VolumeReplication(self).create_replication(
volume.name, replication_info)
# mirror entire group
group_name = self._cg_name_from_group(group)
try:
self._create_consistencygroup_on_remote(context, group_name)
except errors.CgNameExistsError:
LOG.debug("CG name %(cg)s exists, no need to open it on "
"secondary backend.", {'cg': group_name})
repl.GroupReplication(self).create_replication(group_name,
replication_info)
updated_volumes = []
if volumes:
# add volumes back to cg
self._update_consistencygroup(context, group,
add_volumes=volumes)
for volume in volumes:
updated_volumes.append(
{'id': volume['id'],
'replication_status':
fields.ReplicationStatus.ENABLED})
return ({'replication_status': fields.ReplicationStatus.ENABLED},
updated_volumes)
else:
# For generic groups we replicate all the volumes
updated_volumes = []
for volume in volumes:
repl.VolumeReplication(self).create_replication(
volume.name, replication_info)
# update status
for volume in volumes:
updated_volumes.append(
{'id': volume['id'],
'replication_status': fields.ReplicationStatus.ENABLED})
return ({'replication_status': fields.ReplicationStatus.ENABLED},
updated_volumes)
@proxy._trace_time
def disable_replication(self, context, group, volumes):
"""disables CG replication"""
group_specs = group_types.get_group_type_specs(group.group_type_id)
if not group_specs:
msg = 'No group specs inside group type'
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
replication_info = self._get_replication_info(group_specs)
updated_volumes = []
if utils.is_group_a_cg_snapshot_type(group):
# one call deletes replication for cgs and volumes together.
group_name = self._cg_name_from_group(group)
repl.GroupReplication(self).delete_replication(group_name,
replication_info)
for volume in volumes:
# xiv locks volumes after deletion of replication.
# we need to unlock it for further use.
try:
self.ibm_storage_cli.cmd.vol_unlock(vol=volume.name)
self.ibm_storage_remote_cli.cmd.vol_unlock(
vol=volume.name)
self.ibm_storage_remote_cli.cmd.cg_remove_vol(
vol=volume.name)
except errors.VolumeBadNameError:
LOG.debug("Failed to delete vol %(vol)s - "
"ignoring.", {'vol': volume.name})
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg = ('Failed to unlock volumes %(details)s' %
{'details': details})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
updated_volumes.append(
{'id': volume.id,
'replication_status': fields.ReplicationStatus.DISABLED})
else:
# For generic groups we replicate all the volumes
updated_volumes = []
for volume in volumes:
repl.VolumeReplication(self).delete_replication(
volume.name, replication_info)
# update status
for volume in volumes:
try:
self.ibm_storage_cli.cmd.vol_unlock(vol=volume.name)
self.ibm_storage_remote_cli.cmd.vol_unlock(
vol=volume.name)
except errors.XCLIError as e:
details = self._get_code_and_status_or_message(e)
msg = (_('Failed to unlock volumes %(details)s'),
{'details': details})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
updated_volumes.append(
{'id': volume['id'],
'replication_status': fields.ReplicationStatus.DISABLED})
return ({'replication_status': fields.ReplicationStatus.DISABLED},
updated_volumes)
def get_secondary_backend_id(self, secondary_backend_id):
if secondary_backend_id is None:
secondary_backend_id = self._get_target()
if secondary_backend_id is None:
msg = _("No targets defined. Can't perform failover.")
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
return secondary_backend_id
def check_for_splitbrain(self, volumes, pool_master, pool_slave):
if volumes:
# check for split brain situations
# check for files that are available on both volumes
# and are not in an active mirroring relation
split_brain = self._potential_split_brain(
self.ibm_storage_cli,
self.ibm_storage_remote_cli,
volumes, pool_master,
pool_slave)
if split_brain:
# if such a situation exists stop and raise an exception!
msg = (_("A potential split brain condition has been found "
"with the following volumes: \n'%(volumes)s.'") %
{'volumes': split_brain})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
def failover_replication(self, context, group, volumes,
secondary_backend_id):
"""Failover a cg with all it's volumes.
if secondery_id is default, cg needs to be failed back.
"""
volumes_updated = []
goal_status = ''
pool_master = None
group_updated = {'replication_status': group.replication_status}
LOG.info("failover_replication: of cg %(cg)s "
"from %(active)s to %(id)s",
{'cg': group.get('name'),
'active': self.active_backend_id,
'id': secondary_backend_id})
if secondary_backend_id == strings.PRIMARY_BACKEND_ID:
# default as active backend id
if self._using_default_backend():
LOG.info("CG has been failed back. "
"No need to fail back again.")
return group_updated, volumes_updated
# get the master pool, not using default id.
pool_master = self._get_target_params(
self.active_backend_id)['san_clustername']
pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']]
goal_status = 'enabled'
vol_goal_status = 'available'
else:
if not self._using_default_backend():
LOG.info("cg already failed over.")
return group_updated, volumes_updated
# using same api as Cheesecake, we need
# replciation_device entry. so we use get_targets.
secondary_backend_id = self.get_secondary_backend_id(
secondary_backend_id)
pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']]
pool_slave = self._get_target_params(
secondary_backend_id)['san_clustername']
goal_status = fields.ReplicationStatus.FAILED_OVER
vol_goal_status = fields.ReplicationStatus.FAILED_OVER
# we should have secondary_backend_id by here.
self.ibm_storage_remote_cli = self._init_xcli(secondary_backend_id)
# check for split brain in mirrored volumes
self.check_for_splitbrain(volumes, pool_master, pool_slave)
group_specs = group_types.get_group_type_specs(group.group_type_id)
if group_specs is None:
msg = "No group specs found. Cannot failover."
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
failback = (secondary_backend_id == strings.PRIMARY_BACKEND_ID)
result = False
details = ""
if utils.is_group_a_cg_snapshot_type(group):
result, details = repl.GroupReplication(self).failover(group,
failback)
else:
replicated_vols = []
for volume in volumes:
result, details = repl.VolumeReplication(self).failover(
volume, failback)
if not result:
break
replicated_vols.append(volume)
# switch the replicated ones back in case of error
if not result:
for volume in replicated_vols:
result, details = repl.VolumeReplication(self).failover(
volume, not failback)
if result:
status = goal_status
group_updated['replication_status'] = status
else:
status = 'error'
updates = {'status': vol_goal_status}
if status == 'error':
group_updated['replication_extended_status'] = details
# if replication on cg was successful, then all of the volumes
# have been successfully replicated as well.
for volume in volumes:
volumes_updated.append({
'id': volume.id,
'updates': updates
})
# replace between active and secondary xcli
self._replace_xcli_to_remote_xcli()
self.active_backend_id = secondary_backend_id
return group_updated, volumes_updated
def _replace_xcli_to_remote_xcli(self):
temp_ibm_storage_cli = self.ibm_storage_cli
self.ibm_storage_cli = self.ibm_storage_remote_cli
self.ibm_storage_remote_cli = temp_ibm_storage_cli
def _get_replication_target_params(self):
LOG.debug('_get_replication_target_params.')
if not self.targets:
msg = _("No targets available for replication")
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
no_of_targets = len(self.targets)
if no_of_targets > 1:
msg = _("Too many targets configured. Only one is supported")
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
LOG.debug('_get_replication_target_params selecting target...')
target = self._get_target()
if not target:
msg = _("No targets available for replication.")
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
params = self._get_target_params(target)
if not params:
msg = (_("Missing target information for target '%(target)s'"),
{'target': target})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
return target, params
def _delete_volume(self, vol_name):
"""Deletes a volume on the Storage."""
LOG.debug("_delete_volume: %(volume)s",
{'volume': vol_name})
try:
self._call_xiv_xcli("vol_delete", vol=vol_name)
except errors.VolumeBadNameError:
# Don't throw error here, allow the cinder volume manager
# to set the volume as deleted if it's not available
# on the XIV box
LOG.info("Volume '%(volume)s' not found on storage",
{'volume': vol_name})
def _silent_delete_volume(self, volume):
"""Silently delete a volume.
silently delete a volume in case of an immediate failure
within a function that created it.
"""
try:
self._delete_volume(vol_name=volume['name'])
except errors.XCLIError as e:
error = self._get_code_and_status_or_message(e)
LOG.error(DELETE_VOLUME_BASE_ERROR,
{'volume': volume['name'], 'error': error})
def _silent_delete_volume_from_cg(self, volume, cgname):
"""Silently delete a volume from CG.
silently delete a volume in case of an immediate failure
within a function that created it.
"""
try:
self._call_xiv_xcli(
"cg_remove_vol", vol=volume['name'])
except errors.XCLIError as e:
LOG.error("Failed removing volume %(vol)s from "
"consistency group %(cg)s: %(err)s",
{'vol': volume['name'],
'cg': cgname,
'err': self._get_code_and_status_or_message(e)})
self._silent_delete_volume(volume=volume)
@proxy._trace_time
def delete_volume(self, volume):
"""Deletes a volume on the Storage machine."""
LOG.debug("delete_volume: %(volume)s",
{'volume': volume['name']})
# read replication information
specs = self._get_extra_specs(volume.get('volume_type_id', None))
replication_info = self._get_replication_info(specs)
if replication_info['enabled']:
try:
repl.VolumeReplication(self).delete_replication(
volume.name, replication_info)
except Exception as e:
error = self._get_code_and_status_or_message(e)
LOG.error(DELETE_VOLUME_BASE_ERROR,
{'volume': volume['name'], 'error': error})
# continue even if failed
# attempt to delete volume at target
target = None
try:
target, params = self._get_replication_target_params()
LOG.info('Target %(target)s: %(params)s',
{'target': target, 'params': params})
except Exception as e:
LOG.error("Unable to delete replicated volume "
"'%(volume)s': %(error)s.",
{'error': self._get_code_and_status_or_message(e),
'volume': volume['name']})
if target:
try:
self._call_remote_xiv_xcli(
"vol_delete", vol=volume['name'])
except errors.XCLIError as e:
LOG.error(
"Unable to delete replicated volume "
"'%(volume)s': %(error)s.",
{'error': self._get_code_and_status_or_message(e),
'volume': volume['name']})
try:
self._delete_volume(volume['name'])
except errors.XCLIError as e:
LOG.error(DELETE_VOLUME_BASE_ERROR,
{'volume': volume['name'],
'error': self._get_code_and_status_or_message(e)})
@proxy._trace_time
def initialize_connection(self, volume, connector):
"""Initialize connection to instance.
Maps the created volume to the nova volume node,
and returns the iSCSI target to be used in the instance
"""
connection_type = self._get_connection_type()
LOG.debug("initialize_connection: %(volume)s %(connector)s"
" connection_type: %(connection_type)s",
{'volume': volume['name'], 'connector': connector,
'connection_type': connection_type})
# This call does all the work..
fc_targets, host = self._get_host_and_fc_targets(
volume, connector)
lun_id = self._vol_map_and_get_lun_id(
volume, connector, host)
meta = {
'driver_volume_type': connection_type,
'data': {
'target_discovered': True,
'target_lun': lun_id,
'volume_id': volume['id'],
},
}
if connection_type == storage.XIV_CONNECTION_TYPE_ISCSI:
meta['data']['target_portal'] = self.meta['ibm_storage_portal']
meta['data']['target_iqn'] = self.meta['ibm_storage_iqn']
meta['data']['provider_location'] = "%s,1 %s %s" % (
self.meta['ibm_storage_portal'],
self.meta['ibm_storage_iqn'], lun_id)
chap_type = self._get_chap_type()
LOG.debug("initialize_connection: %(volume)s."
" chap_type:%(chap_type)s",
{'volume': volume['name'],
'chap_type': chap_type})
if chap_type == storage.CHAP_ENABLED:
chap = self._create_chap(host)
meta['data']['auth_method'] = 'CHAP'
meta['data']['auth_username'] = chap[0]
meta['data']['auth_password'] = chap[1]
else:
all_storage_wwpns = self._get_fc_targets(None)
meta['data']['all_storage_wwpns'] = all_storage_wwpns
modules = set()
for wwpn in fc_targets:
modules.add(wwpn[-2])
meta['data']['recommended_connections'] = (
len(modules) * CONNECTIONS_PER_MODULE)
meta['data']['target_wwn'] = fc_targets
if fc_targets == []:
fc_targets = all_storage_wwpns
meta['data']['initiator_target_map'] = (
self._build_initiator_target_map(fc_targets, connector))
LOG.debug(six.text_type(meta))
return meta
@proxy._trace_time
def terminate_connection(self, volume, connector):
"""Terminate connection.
Unmaps volume. If this is the last connection from the host, undefines
the host from the storage.
"""
LOG.debug("terminate_connection: %(volume)s %(connector)s",
{'volume': volume['name'], 'connector': connector})
host = self._get_host(connector)
if host is None:
LOG.error(TERMINATE_CONNECTION_BASE_ERROR,
{'volume': volume['name'],
'error': "Host not found."})
return
fc_targets = {}
if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_FC:
fc_targets = self._get_fc_targets(host)
try:
self._call_xiv_xcli(
"unmap_vol",
vol=volume['name'],
host=host.get('name'))
except errors.VolumeBadNameError:
LOG.error(TERMINATE_CONNECTION_BASE_ERROR,
{'volume': volume['name'],
'error': "Volume not found."})
except errors.XCLIError as err:
details = self._get_code_and_status_or_message(err)
LOG.error(TERMINATE_CONNECTION_BASE_ERROR,
{'volume': volume['name'],
'error': details})
# check if there are still mapped volumes or we can
# remove this host
host_mappings = []
try:
host_mappings = self._call_xiv_xcli(
"mapping_list",
host=host.get('name')).as_list
if len(host_mappings) == 0:
LOG.info("Terminate connection for volume '%(volume)s': "
"%(host)s %(info)s.",
{'volume': volume['name'],
'host': host.get('name'),
'info': "will be deleted"})
if not self._is_iscsi():
# The following meta data is provided so that zoning can
# be cleared
meta = {
'driver_volume_type': self._get_connection_type(),
'data': {'volume_id': volume['id'], },
}
meta['data']['target_wwn'] = fc_targets
meta['data']['initiator_target_map'] = (
self._build_initiator_target_map(fc_targets,
connector))
self._call_xiv_xcli("host_delete", host=host.get('name'))
if not self._is_iscsi():
return meta
return None
else:
LOG.debug(("Host '%(host)s' has additional mapped "
"volumes %(mappings)s"),
{'host': host.get('name'),
'mappings': host_mappings})
except errors.HostBadNameError:
LOG.error(TERMINATE_CONNECTION_HOST_ERROR,
{'volume': volume['name'],
'host': host.get('name'),
'error': "Host not found."})
except errors.XCLIError as err:
details = self._get_code_and_status_or_message(err)
LOG.error(TERMINATE_CONNECTION_HOST_ERROR,
{'volume': volume['name'],
'host': host.get('name'),
'error': details})
def _create_volume_from_snapshot(self, volume,
snapshot_name, snapshot_size):
"""Create volume from snapshot internal implementation.
used for regular snapshot and cgsnapshot
"""
LOG.debug("_create_volume_from_snapshot: %(volume)s from %(name)s",
{'volume': volume['name'], 'name': snapshot_name})
# TODO(alonma): Refactor common validation
volume_size = float(volume['size'])
if volume_size < snapshot_size:
error = (_("Volume size (%(vol_size)sGB) cannot be smaller than "
"the snapshot size (%(snap_size)sGB)..")
% {'vol_size': volume_size,
'snap_size': snapshot_size})
LOG.error(error)
raise self._get_exception()(error)
self.create_volume(volume)
try:
self._call_xiv_xcli(
"vol_copy", vol_src=snapshot_name, vol_trg=volume['name'])
except errors.XCLIError as e:
error = (_("Fatal error in copying volume: %(details)s")
% {'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
self._silent_delete_volume(volume)
raise self._get_exception()(error)
# A side effect of vol_copy is the resizing of the destination volume
# to the size of the source volume. If the size is different we need
# to get it back to the desired size
if snapshot_size == volume_size:
return
size = storage.gigabytes_to_blocks(volume_size)
try:
self._call_xiv_xcli(
"vol_resize", vol=volume['name'], size_blocks=size)
except errors.XCLIError as e:
error = (_("Fatal error in resize volume: %(details)s")
% {'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
self._silent_delete_volume(volume)
raise self._get_exception()(error)
@proxy._trace_time
def create_volume_from_snapshot(self, volume, snapshot):
"""create volume from snapshot."""
snapshot_size = float(snapshot['volume_size'])
self._create_volume_from_snapshot(volume, snapshot.name, snapshot_size)
@proxy._trace_time
def create_snapshot(self, snapshot):
"""create snapshot."""
try:
self._call_xiv_xcli(
"snapshot_create", vol=snapshot['volume_name'],
name=snapshot['name'])
except errors.XCLIError as e:
error = (_("Fatal error in snapshot_create: %(details)s")
% {'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
@proxy._trace_time
def delete_snapshot(self, snapshot):
"""delete snapshot."""
try:
self._call_xiv_xcli(
"snapshot_delete", snapshot=snapshot['name'])
except errors.XCLIError as e:
error = (_("Fatal error in snapshot_delete: %(details)s")
% {'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
@proxy._trace_time
def extend_volume(self, volume, new_size):
"""Resize volume."""
volume_size = float(volume['size'])
wanted_size = float(new_size)
if wanted_size == volume_size:
return
shrink = 'yes' if wanted_size < volume_size else 'no'
size = storage.gigabytes_to_blocks(wanted_size)
try:
self._call_xiv_xcli(
"vol_resize", vol=volume['name'],
size_blocks=size, shrink_volume=shrink)
except errors.XCLIError as e:
error = (_("Fatal error in vol_resize: %(details)s")
% {'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
@proxy._trace_time
def migrate_volume(self, context, volume, host):
"""Migrate volume to another backend.
Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
Storage-assisted migration...
"""
false_ret = (False, None)
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
dest, dest_host, dest_pool = info.split(':')
except ValueError:
return false_ret
volume_host = volume.host.split('_')[1]
if dest != strings.XIV_BACKEND_PREFIX or dest_host != volume_host:
return false_ret
if volume.attach_status == 'attached':
LOG.info("Storage-assisted volume migration: Volume "
"%(volume)s is attached",
{'volume': volume.id})
try:
self._call_xiv_xcli(
"vol_move", vol=volume.name,
pool=dest_pool)
except errors.XCLIError as e:
error = (_("Fatal error in vol_move: %(details)s")
% {'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
return (True, None)
@proxy._trace_time
def manage_volume(self, volume, reference):
"""Brings an existing backend storage object under Cinder management.
reference value is passed straight from the get_volume_list helper
function. it is up to the driver how this should be interpreted.
It should be sufficient to identify a storage object that the driver
should somehow associate with the newly-created cinder volume
structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the reference doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
"""
existing_volume = reference['source-name']
LOG.debug("manage_volume: %(volume)s", {'volume': existing_volume})
# check that volume exists
try:
volumes = self._call_xiv_xcli(
"vol_list", vol=existing_volume).as_list
except errors.XCLIError as e:
error = (MANAGE_VOLUME_BASE_ERROR
% {'volume': existing_volume,
'error': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
if len(volumes) != 1:
error = (MANAGE_VOLUME_BASE_ERROR
% {'volume': existing_volume,
'error': 'Volume does not exist'})
LOG.error(error)
raise self._get_exception()(error)
volume['size'] = float(volumes[0]['size'])
# option 1:
# rename volume to volume['name']
try:
self._call_xiv_xcli(
"vol_rename",
vol=existing_volume,
new_name=volume['name'])
except errors.XCLIError as e:
error = (MANAGE_VOLUME_BASE_ERROR
% {'volume': existing_volume,
'error': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
# option 2:
# return volume name as admin metadata
# update the admin metadata DB
# Need to do the ~same in create data. use the metadata instead of the
# volume name
return {}
@proxy._trace_time
def manage_volume_get_size(self, volume, reference):
"""Return size of volume to be managed by manage_volume.
When calculating the size, round up to the next GB.
"""
existing_volume = reference['source-name']
# check that volume exists
try:
volumes = self._call_xiv_xcli(
"vol_list", vol=existing_volume).as_list
except errors.XCLIError as e:
error = (_("Fatal error in vol_list: %(details)s")
% {'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
if len(volumes) != 1:
error = (_("Volume %(volume)s is not available on storage") %
{'volume': existing_volume})
LOG.error(error)
raise self._get_exception()(error)
return float(volumes[0]['size'])
@proxy._trace_time
def unmanage_volume(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
"""
pass
@proxy._trace_time
def get_replication_status(self, context, volume):
"""Return replication status."""
pass
def freeze_backend(self, context):
"""Notify the backend that it's frozen."""
# go over volumes in backend that are replicated and lock them
pass
def thaw_backend(self, context):
"""Notify the backend that it's unfrozen/thawed."""
# go over volumes in backend that are replicated and unlock them
pass
def _using_default_backend(self):
return ((self.active_backend_id is None) or
(self.active_backend_id == strings.PRIMARY_BACKEND_ID))
def _is_vol_split_brain(self, xcli_master, xcli_slave, vol):
mirror_master = xcli_master.cmd.mirror_list(vol=vol).as_list
mirror_slave = xcli_slave.cmd.mirror_list(vol=vol).as_list
if (len(mirror_master) == 1 and len(mirror_slave) == 1 and
mirror_master[0].current_role == 'Master' and
mirror_slave[0].current_role == 'Slave' and
mirror_master[0].sync_state.lower() in SYNCHED_STATES):
return False
else:
return True
def _potential_split_brain(self, xcli_master, xcli_slave,
volumes, pool_master, pool_slave):
potential_split_brain = []
if xcli_master is None or xcli_slave is None:
return potential_split_brain
try:
vols_master = xcli_master.cmd.vol_list(
pool=pool_master).as_dict('name')
except Exception:
msg = "Failed getting information from the active storage."
LOG.debug(msg)
return potential_split_brain
try:
vols_slave = xcli_slave.cmd.vol_list(
pool=pool_slave).as_dict('name')
except Exception:
msg = "Failed getting information from the target storage."
LOG.debug(msg)
return potential_split_brain
vols_requested = set(vol['name'] for vol in volumes)
common_vols = set(vols_master).intersection(
set(vols_slave)).intersection(set(vols_requested))
for name in common_vols:
if self._is_vol_split_brain(xcli_master=xcli_master,
xcli_slave=xcli_slave, vol=name):
potential_split_brain.append(name)
return potential_split_brain
@proxy._trace_time
def failover_host(self, context, volumes, secondary_id, groups=None):
"""Failover a full backend.
Fails over the volume back and forth, if secondary_id is 'default',
volumes will be failed back, otherwize failed over.
Note that the resulting status depends on the direction:
in case of failover it will be 'failed-over' and in case of
failback it will be 'available'
"""
volume_update_list = []
LOG.info("failover_host: from %(active)s to %(id)s",
{'active': self.active_backend_id, 'id': secondary_id})
# special cases to handle
if secondary_id == strings.PRIMARY_BACKEND_ID:
# case: already failed back
if self._using_default_backend():
LOG.info("Host has been failed back. No need "
"to fail back again.")
return self.active_backend_id, volume_update_list, []
pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']]
pool_master = self._get_target_params(
self.active_backend_id)['san_clustername']
goal_status = 'available'
else:
if not self._using_default_backend():
LOG.info("Already failed over. No need to failover again.")
return self.active_backend_id, volume_update_list, []
# case: need to select a target
secondary_id = self.get_secondary_backend_id(secondary_id)
pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']]
try:
pool_slave = self._get_target_params(
secondary_id)['san_clustername']
except Exception:
msg = _("Invalid target information. Can't perform failover")
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
pool_master = self.storage_info[storage.FLAG_KEYS['storage_pool']]
goal_status = fields.ReplicationStatus.FAILED_OVER
# connnect xcli to secondary storage according to backend_id by
# calling _init_xcli with secondary_id
self.ibm_storage_remote_cli = self._init_xcli(secondary_id)
# get replication_info for all volumes at once
if len(volumes):
# check for split brain situations
# check for files that are available on both volumes
# and are not in an active mirroring relation
self.check_for_splitbrain(volumes, pool_master, pool_slave)
# loop over volumes and attempt failover
for volume in volumes:
LOG.debug("Attempting to failover '%(vol)s'",
{'vol': volume['name']})
result, details = repl.VolumeReplication(self).failover(
volume, failback=(secondary_id == strings.PRIMARY_BACKEND_ID))
if result:
status = goal_status
else:
status = 'error'
updates = {'status': status}
if status == 'error':
updates['replication_extended_status'] = details
volume_update_list.append({
'volume_id': volume['id'],
'updates': updates
})
# set active xcli to secondary xcli
self._replace_xcli_to_remote_xcli()
# set active backend id to secondary id
self.active_backend_id = secondary_id
return secondary_id, volume_update_list, []
@proxy._trace_time
def retype(self, ctxt, volume, new_type, diff, host):
"""Change volume type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities
"""
LOG.debug("retype: volume = %(vol)s type = %(ntype)s",
{'vol': volume.get('display_name'),
'ntype': new_type['name']})
if 'location_info' not in host['capabilities']:
return False
info = host['capabilities']['location_info']
try:
(dest, dest_host, dest_pool) = info.split(':')
except ValueError:
return False
volume_host = volume.get('host').split('_')[1]
if (dest != strings.XIV_BACKEND_PREFIX or dest_host != volume_host):
return False
pool_name = self._get_backend_pool()
# if pool is different. else - we're on the same pool and retype is ok.
if (pool_name != dest_pool):
# The input host and pool are already "linked" to the new_type,
# otherwise the scheduler does not assign them as candidates for
# the retype thus we just need to migrate the volume to the new
# pool
LOG.debug("retype: migrate volume %(vol)s to "
"host=%(host)s, pool=%(pool)s",
{'vol': volume.get('display_name'),
'host': dest_host, 'pool': dest_pool})
(mig_result, model) = self.migrate_volume(
context=ctxt, volume=volume, host=host)
if not mig_result:
raise self.meta['exception'].VolumeBackendAPIException(
data=PERF_CLASS_ADD_ERROR)
# Migration occurred, retype has finished.
# We need to check for type and QoS.
# getting the old specs
old_specs = self._qos_specs_from_volume(volume)
new_specs = self._get_qos_specs(new_type.get('id', None))
if not new_specs:
if old_specs:
LOG.debug("qos: removing qos class for %(vol)s.",
{'vol': volume.display_name})
self._qos_remove_vol(volume)
return True
perf_class_name_old = self._check_perf_class_on_backend(old_specs)
perf_class_name_new = self._check_perf_class_on_backend(new_specs)
if perf_class_name_new != perf_class_name_old:
# add new qos to vol. (removed from old qos automatically)
self._qos_add_vol(volume, perf_class_name_new)
return True
@proxy._trace_time
def _check_storage_version_for_qos_support(self):
if self.meta['storage_version'] is None:
self.meta['storage_version'] = self._call_xiv_xcli(
"version_get").as_single_element.system_version
if int(self.meta['storage_version'][0:2]) >= 12:
return 'True'
return 'False'
@proxy._trace_time
def | (self):
"""fetch and update stats."""
LOG.debug("Entered XIVProxy::_update_stats:")
self.meta['stat'] = {}
connection_type = self._get_connection_type()
backend_name = None
if self.driver:
backend_name = self.driver.configuration.safe_get(
'volume_backend_name')
self.meta['stat']['reserved_percentage'] = (
self.driver.configuration.safe_get('reserved_percentage'))
self.meta['stat']["volume_backend_name"] = (
backend_name or '%s_%s_%s_%s' % (
strings.XIV_BACKEND_PREFIX,
self.storage_info[storage.FLAG_KEYS['address']],
self.storage_info[storage.FLAG_KEYS['storage_pool']],
connection_type))
self.meta['stat']["vendor_name"] = 'IBM'
self.meta['stat']["driver_version"] = self.full_version
self.meta['stat']["storage_protocol"] = connection_type
self.meta['stat']['multiattach'] = False
self.meta['stat']['group_replication_enabled'] = True
self.meta['stat']['consistent_group_replication_enabled'] = True
self.meta['stat']['QoS_support'] = (
self._check_storage_version_for_qos_support())
self.meta['stat']['location_info'] = (
('%(destination)s:%(hostname)s:%(pool)s' %
{'destination': strings.XIV_BACKEND_PREFIX,
'hostname': self.storage_info[storage.FLAG_KEYS['address']],
'pool': self.storage_info[storage.FLAG_KEYS['storage_pool']]
}))
self._retrieve_pool_stats(self.meta)
if self.targets:
self.meta['stat']['replication_enabled'] = True
self.meta['stat']['replication_type'] = [SYNC, ASYNC]
self.meta['stat']['rpo'] = repl.Replication.get_supported_rpo()
self.meta['stat']['replication_count'] = len(self.targets)
self.meta['stat']['replication_targets'] = [target for target in
self.targets]
self.meta['stat']['timestamp'] = datetime.datetime.utcnow()
LOG.debug("Exiting XIVProxy::_update_stats: %(stat)s",
{'stat': self.meta['stat']})
@proxy._trace_time
def _get_pool(self):
pool_name = self._get_backend_pool()
pools = self._call_xiv_xcli(
"pool_list", pool=pool_name).as_list
if not pools:
msg = (_(
"Pool %(pool)s not available on storage") %
{'pool': pool_name})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(data=msg)
return pools
def _get_backend_pool(self):
if self.active_backend_id == strings.PRIMARY_BACKEND_ID:
return self.storage_info[storage.FLAG_KEYS['storage_pool']]
else:
return self._get_target_params(
self.active_backend_id)['san_clustername']
def _retrieve_pool_stats(self, data):
try:
pools = self._get_pool()
pool = pools[0]
data['stat']['pool_name'] = pool.get('name')
# handle different fields in pool_list between Gen3 and BR
soft_size = pool.get('soft_size')
if soft_size is None:
soft_size = pool.get('size')
hard_size = 0
else:
hard_size = pool.hard_size
data['stat']['total_capacity_gb'] = int(soft_size)
data['stat']['free_capacity_gb'] = int(
pool.get('empty_space_soft', pool.get('empty_space')))
# thin/thick provision
data['stat']['thin_provisioning_support'] = (
'True' if soft_size > hard_size else 'False')
data['stat']['backend_state'] = 'up'
except Exception as e:
data['stat']['total_capacity_gb'] = 0
data['stat']['free_capacity_gb'] = 0
data['stat']['thin_provision'] = False
data['stat']['backend_state'] = 'down'
error = self._get_code_and_status_or_message(e)
LOG.error(error)
@proxy._trace_time
def create_cloned_volume(self, volume, src_vref):
"""Create cloned volume."""
# read replication information
specs = self._get_extra_specs(volume.get('volume_type_id', None))
replication_info = self._get_replication_info(specs)
# TODO(alonma): Refactor to use more common code
src_vref_size = float(src_vref['size'])
volume_size = float(volume['size'])
if volume_size < src_vref_size:
error = (_("New volume size (%(vol_size)s GB) cannot be less"
"than the source volume size (%(src_size)s GB)..")
% {'vol_size': volume_size, 'src_size': src_vref_size})
LOG.error(error)
raise self._get_exception()(error)
self._create_volume(volume)
try:
self._call_xiv_xcli(
"vol_copy",
vol_src=src_vref['name'],
vol_trg=volume['name'])
except errors.XCLIError as e:
error = (_("Failed to copy from '%(src)s' to '%(vol)s': "
"%(details)s")
% {'src': src_vref.get('name', ''),
'vol': volume.get('name', ''),
'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
self._silent_delete_volume(volume=volume)
raise self._get_exception()(error)
# A side effect of vol_copy is the resizing of the destination volume
# to the size of the source volume. If the size is different we need
# to get it back to the desired size
if src_vref_size != volume_size:
size = storage.gigabytes_to_blocks(volume_size)
try:
self._call_xiv_xcli(
"vol_resize",
vol=volume['name'],
size_blocks=size)
except errors.XCLIError as e:
error = (_("Fatal error in vol_resize: %(details)s")
% {'details':
self._get_code_and_status_or_message(e)})
LOG.error(error)
self._silent_delete_volume(volume=volume)
raise self._get_exception()(error)
self.handle_created_vol_properties(replication_info, volume)
@proxy._trace_time
def volume_exists(self, volume):
"""Checks if a volume exists on xiv."""
return len(self._call_xiv_xcli(
"vol_list", vol=volume['name']).as_list) > 0
def _cg_name_from_id(self, id):
'''Get storage CG name from id.
A utility method to translate from id
to CG name on the storage
'''
return "cg_%(id)s" % {'id': id}
def _group_name_from_id(self, id):
'''Get storage group name from id.
A utility method to translate from id
to Snapshot Group name on the storage
'''
return "cgs_%(id)s" % {'id': id}
def _cg_name_from_volume(self, volume):
'''Get storage CG name from volume.
A utility method to translate from openstack volume
to CG name on the storage
'''
LOG.debug("_cg_name_from_volume: %(vol)s",
{'vol': volume['name']})
cg_id = volume.get('group_id', None)
if cg_id:
cg_name = self._cg_name_from_id(cg_id)
LOG.debug("Volume %(vol)s is in CG %(cg)s",
{'vol': volume['name'], 'cg': cg_name})
return cg_name
else:
LOG.debug("Volume %(vol)s not in CG",
{'vol': volume['name']})
return None
def _cg_name_from_group(self, group):
'''Get storage CG name from group.
A utility method to translate from openstack group
to CG name on the storage
'''
return self._cg_name_from_id(group['id'])
def _cg_name_from_cgsnapshot(self, cgsnapshot):
'''Get storage CG name from snapshot.
A utility method to translate from openstack cgsnapshot
to CG name on the storage
'''
return self._cg_name_from_id(cgsnapshot['group_id'])
def _group_name_from_cgsnapshot_id(self, cgsnapshot_id):
'''Get storage Snaphost Group name from snapshot.
A utility method to translate from openstack cgsnapshot
to Snapshot Group name on the storage
'''
return self._group_name_from_id(cgsnapshot_id)
def _volume_name_from_cg_snapshot(self, cgs, vol):
# Note: The string is limited by the storage to 63 characters
return ('%(cgs)s.%(vol)s' % {'cgs': cgs, 'vol': vol})[0:62]
@proxy._trace_time
def create_group(self, context, group):
"""Creates a group."""
if utils.is_group_a_cg_snapshot_type(group):
cgname = self._cg_name_from_group(group)
return self._create_consistencygroup(context, cgname)
# For generic group, create is executed by manager
raise NotImplementedError()
def _create_consistencygroup(self, context, cgname):
"""Creates a consistency group."""
LOG.info("Creating consistency group %(name)s.",
{'name': cgname})
# call XCLI
try:
self._call_xiv_xcli(
"cg_create", cg=cgname,
pool=self.storage_info[
storage.FLAG_KEYS['storage_pool']]).as_list
except errors.CgNameExistsError as e:
error = (_("consistency group %s already exists on backend") %
cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.CgLimitReachedError as e:
error = _("Reached Maximum number of consistency groups")
LOG.error(error)
raise self._get_exception()(error)
except errors.XCLIError as e:
error = (_("Fatal error in cg_create: %(details)s") %
{'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
model_update = {'status': fields.GroupStatus.AVAILABLE}
return model_update
def _create_consistencygroup_on_remote(self, context, cgname):
"""Creates a consistency group on secondary machine.
Return group available even if it already exists (for replication)
"""
LOG.info("Creating consistency group %(name)s on secondary.",
{'name': cgname})
# call remote XCLI
try:
self._call_remote_xiv_xcli(
"cg_create", cg=cgname,
pool=self.storage_info[
storage.FLAG_KEYS['storage_pool']]).as_list
except errors.CgNameExistsError:
model_update = {'status': fields.GroupStatus.AVAILABLE}
except errors.CgLimitReachedError:
error = _("Maximum number of consistency groups reached")
LOG.error(error)
raise self._get_exception()(error)
except errors.XCLIError as e:
error = (_("Fatal error in cg_create on remote: %(details)s") %
{'details': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
model_update = {'status': fields.GroupStatus.AVAILABLE}
return model_update
def _silent_cleanup_consistencygroup_from_src(self, context, group,
volumes, cgname):
"""Silent cleanup of volumes from CG.
Silently cleanup volumes and created consistency-group from
storage. This function is called after a failure already occurred
and just logs errors, but does not raise exceptions
"""
for volume in volumes:
self._silent_delete_volume_from_cg(volume=volume, cgname=cgname)
try:
self._delete_consistencygroup(context, group, [])
except Exception as e:
details = self._get_code_and_status_or_message(e)
LOG.error('Failed to cleanup CG %(details)s',
{'details': details})
@proxy._trace_time
def create_group_from_src(self, context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols):
"""Create volume group from volume group or volume group snapshot."""
if utils.is_group_a_cg_snapshot_type(group):
return self._create_consistencygroup_from_src(context, group,
volumes,
group_snapshot,
sorted_snapshots,
source_group,
sorted_source_vols)
else:
raise NotImplementedError()
def _create_consistencygroup_from_src(self, context, group, volumes,
cgsnapshot, snapshots, source_cg,
sorted_source_vols):
"""Creates a consistency group from source.
Source can be a cgsnapshot with the relevant list of snapshots,
or another CG with its list of volumes.
"""
cgname = self._cg_name_from_group(group)
LOG.info("Creating consistency group %(cg)s from src.",
{'cg': cgname})
volumes_model_update = []
if cgsnapshot and snapshots:
LOG.debug("Creating from cgsnapshot %(cg)s",
{'cg': self._cg_name_from_group(cgsnapshot)})
try:
self._create_consistencygroup(context, cgname)
except Exception as e:
LOG.error(
"Creating CG from cgsnapshot failed: %(details)s",
{'details': self._get_code_and_status_or_message(e)})
raise
created_volumes = []
try:
groupname = self._group_name_from_cgsnapshot_id(
cgsnapshot['id'])
for volume, source in zip(volumes, snapshots):
vol_name = source.volume_name
LOG.debug("Original volume: %(vol_name)s",
{'vol_name': vol_name})
snapshot_name = self._volume_name_from_cg_snapshot(
groupname, vol_name)
LOG.debug("create volume (vol)s from snapshot %(snap)s",
{'vol': vol_name,
'snap': snapshot_name})
snapshot_size = float(source['volume_size'])
self._create_volume_from_snapshot(
volume, snapshot_name, snapshot_size)
created_volumes.append(volume)
volumes_model_update.append(
{
'id': volume['id'],
'status': 'available',
'size': snapshot_size,
})
except Exception as e:
details = self._get_code_and_status_or_message(e)
msg = (CREATE_VOLUME_BASE_ERROR % {'details': details})
LOG.error(msg)
# cleanup and then raise exception
self._silent_cleanup_consistencygroup_from_src(
context, group, created_volumes, cgname)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
elif source_cg and sorted_source_vols:
LOG.debug("Creating from CG %(cg)s .",
{'cg': self._cg_name_from_group(source_cg)})
LOG.debug("Creating from CG %(cg)s .", {'cg': source_cg['id']})
try:
self._create_consistencygroup(context, group)
except Exception as e:
LOG.error("Creating CG from CG failed: %(details)s",
{'details': self._get_code_and_status_or_message(e)})
raise
created_volumes = []
try:
for volume, source in zip(volumes, sorted_source_vols):
self.create_cloned_volume(volume, source)
created_volumes.append(volume)
volumes_model_update.append(
{
'id': volume['id'],
'status': 'available',
'size': source['size'],
})
except Exception as e:
details = self._get_code_and_status_or_message(e)
msg = (CREATE_VOLUME_BASE_ERROR, {'details': details})
LOG.error(msg)
# cleanup and then raise exception
self._silent_cleanup_consistencygroup_from_src(
context, group, created_volumes, cgname)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
else:
error = 'create_consistencygroup_from_src called without a source'
raise self._get_exception()(error)
model_update = {'status': fields.GroupStatus.AVAILABLE}
return model_update, volumes_model_update
@proxy._trace_time
def delete_group(self, context, group, volumes):
"""Deletes a group."""
rep_status = group.get('replication_status')
enabled = fields.ReplicationStatus.ENABLED
failed_over = fields.ReplicationStatus.FAILED_OVER
if rep_status == enabled or rep_status == failed_over:
msg = _("Disable group replication before deleting group.")
LOG.error(msg)
raise self._get_exception()(msg)
if utils.is_group_a_cg_snapshot_type(group):
return self._delete_consistencygroup(context, group, volumes)
else:
# For generic group delete the volumes only - executed by manager
raise NotImplementedError()
def _delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
cgname = self._cg_name_from_group(group)
LOG.info("Deleting consistency group %(name)s.",
{'name': cgname})
model_update = {}
model_update['status'] = group.get('status',
fields.GroupStatus.DELETING)
# clean up volumes
volumes_model_update = []
for volume in volumes:
try:
self._call_xiv_xcli(
"cg_remove_vol", vol=volume['name'])
except errors.XCLIError as e:
LOG.error("Failed removing volume %(vol)s from "
"consistency group %(cg)s: %(err)s",
{'vol': volume['name'],
'cg': cgname,
'err': self._get_code_and_status_or_message(e)})
# continue in spite of error
try:
self._delete_volume(volume['name'])
# size and volume_type_id are required in liberty code
# they are maintained here for backwards compatability
volumes_model_update.append(
{
'id': volume['id'],
'status': 'deleted',
})
except errors.XCLIError as e:
LOG.error(DELETE_VOLUME_BASE_ERROR,
{'volume': volume['name'],
'error': self._get_code_and_status_or_message(e)})
model_update['status'] = fields.GroupStatus.ERROR_DELETING
# size and volume_type_id are required in liberty code
# they are maintained here for backwards compatibility
volumes_model_update.append(
{
'id': volume['id'],
'status': 'error_deleting',
})
# delete CG from cinder.volume.drivers.ibm.ibm_storage
if model_update['status'] != fields.GroupStatus.ERROR_DELETING:
try:
self._call_xiv_xcli(
"cg_delete", cg=cgname).as_list
model_update['status'] = fields.GroupStatus.DELETED
except (errors.CgDoesNotExistError, errors.CgBadNameError):
LOG.warning("consistency group %(cgname)s does not "
"exist on backend",
{'cgname': cgname})
# if the object was already deleted on the backend, we can
# continue and delete the openstack object
model_update['status'] = fields.GroupStatus.DELETED
except errors.CgHasMirrorError:
error = (_("consistency group %s is being mirrored") % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.CgNotEmptyError:
error = (_("consistency group %s is not empty") % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.XCLIError as e:
error = (_("Fatal: %(code)s. CG: %(cgname)s") %
{'code': self._get_code_and_status_or_message(e),
'cgname': cgname})
LOG.error(error)
raise self._get_exception()(error)
return model_update, volumes_model_update
@proxy._trace_time
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
if utils.is_group_a_cg_snapshot_type(group):
return self._update_consistencygroup(context, group, add_volumes,
remove_volumes)
else:
# For generic group update executed by manager
raise NotImplementedError()
def _update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a consistency group."""
cgname = self._cg_name_from_group(group)
LOG.info("Updating consistency group %(name)s.", {'name': cgname})
model_update = {'status': fields.GroupStatus.AVAILABLE}
add_volumes_update = []
if add_volumes:
for volume in add_volumes:
try:
self._call_xiv_xcli(
"cg_add_vol", vol=volume['name'], cg=cgname)
except errors.XCLIError as e:
error = (_("Failed adding volume %(vol)s to "
"consistency group %(cg)s: %(err)s")
% {'vol': volume['name'],
'cg': cgname,
'err':
self._get_code_and_status_or_message(e)})
LOG.error(error)
self._cleanup_consistencygroup_update(
context, group, add_volumes_update, None)
raise self._get_exception()(error)
add_volumes_update.append({'name': volume['name']})
remove_volumes_update = []
if remove_volumes:
for volume in remove_volumes:
try:
self._call_xiv_xcli(
"cg_remove_vol", vol=volume['name'])
except (errors.VolumeNotInConsGroup,
errors.VolumeBadNameError) as e:
# ignore the error if the volume exists in storage but
# not in cg, or the volume does not exist in the storage
details = self._get_code_and_status_or_message(e)
LOG.debug(details)
except errors.XCLIError as e:
error = (_("Failed removing volume %(vol)s from "
"consistency group %(cg)s: %(err)s")
% {'vol': volume['name'],
'cg': cgname,
'err':
self._get_code_and_status_or_message(e)})
LOG.error(error)
self._cleanup_consistencygroup_update(
context, group, add_volumes_update,
remove_volumes_update)
raise self._get_exception()(error)
remove_volumes_update.append({'name': volume['name']})
return model_update, None, None
def _cleanup_consistencygroup_update(self, context, group,
add_volumes, remove_volumes):
if add_volumes:
for volume in add_volumes:
try:
self._call_xiv_xcli(
"cg_remove_vol", vol=volume['name'])
except Exception:
LOG.debug("cg_remove_vol(%s) failed", volume['name'])
if remove_volumes:
cgname = self._cg_name_from_group(group)
for volume in remove_volumes:
try:
self._call_xiv_xcli(
"cg_add_vol", vol=volume['name'], cg=cgname)
except Exception:
LOG.debug("cg_add_vol(%(name)s, %(cgname)s) failed",
{'name': volume['name'], 'cgname': cgname})
@proxy._trace_time
def create_group_snapshot(self, context, group_snapshot, snapshots):
"""Create volume group snapshot."""
if utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._create_cgsnapshot(context, group_snapshot, snapshots)
else:
# For generic group snapshot create executed by manager
raise NotImplementedError()
def _create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a CG snapshot."""
model_update = {'status': fields.GroupSnapshotStatus.AVAILABLE}
cgname = self._cg_name_from_cgsnapshot(cgsnapshot)
groupname = self._group_name_from_cgsnapshot_id(cgsnapshot['id'])
LOG.info("Creating snapshot %(group)s for CG %(cg)s.",
{'group': groupname, 'cg': cgname})
# call XCLI
try:
self._call_xiv_xcli(
"cg_snapshots_create", cg=cgname,
snap_group=groupname).as_list
except errors.CgDoesNotExistError as e:
error = (_("Consistency group %s does not exist on backend") %
cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.CgBadNameError as e:
error = (_("Consistency group %s has an illegal name") % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.SnapshotGroupDoesNotExistError as e:
error = (_("Snapshot group %s has an illegal name") % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.PoolSnapshotLimitReachedError as e:
error = _("Reached maximum snapshots allocation size")
LOG.error(error)
raise self._get_exception()(error)
except errors.CgEmptyError as e:
error = (_("Consistency group %s is empty") % cgname)
LOG.error(error)
raise self._get_exception()(error)
except (errors.MaxVolumesReachedError,
errors.DomainMaxVolumesReachedError) as e:
error = _("Reached Maximum number of volumes")
LOG.error(error)
raise self._get_exception()(error)
except errors.SnapshotGroupIsReservedError as e:
error = (_("Consistency group %s name is reserved") % cgname)
LOG.error(error)
raise self._get_exception()(error)
except errors.SnapshotGroupAlreadyExistsError as e:
error = (_("Snapshot group %s already exists") % groupname)
LOG.error(error)
raise self._get_exception()(error)
except errors.XCLIError as e:
error = (_("Fatal: CG %(cg)s, Group %(group)s. %(err)s") %
{'cg': cgname,
'group': groupname,
'err': self._get_code_and_status_or_message(e)})
LOG.error(error)
raise self._get_exception()(error)
snapshots_model_update = []
for snapshot in snapshots:
snapshots_model_update.append(
{
'id': snapshot['id'],
'status': fields.SnapshotStatus.AVAILABLE,
})
return model_update, snapshots_model_update
@proxy._trace_time
def delete_group_snapshot(self, context, group_snapshot, snapshots):
"""Delete volume group snapshot."""
if utils.is_group_a_cg_snapshot_type(group_snapshot):
return self._delete_cgsnapshot(context, group_snapshot, snapshots)
else:
# For generic group snapshot delete is executed by manager
raise NotImplementedError()
def _delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a CG snapshot."""
cgname = self._cg_name_from_cgsnapshot(cgsnapshot)
groupname = self._group_name_from_cgsnapshot_id(cgsnapshot['id'])
LOG.info("Deleting snapshot %(group)s for CG %(cg)s.",
{'group': groupname, 'cg': cgname})
# call XCLI
try:
self._call_xiv_xcli(
"snap_group_delete", snap_group=groupname).as_list
except errors.CgDoesNotExistError:
error = _("consistency group %s not found on backend") % cgname
LOG.error(error)
raise self._get_exception()(error)
except errors.PoolSnapshotLimitReachedError:
error = _("Reached Maximum size allocated for snapshots")
LOG.error(error)
raise self._get_exception()(error)
except errors.CgEmptyError:
error = _("Consistency group %s is empty") % cgname
LOG.error(error)
raise self._get_exception()(error)
except errors.XCLIError as e:
error = _("Fatal: CG %(cg)s, Group %(group)s. %(err)s") % {
'cg': cgname,
'group': groupname,
'err': self._get_code_and_status_or_message(e)
}
LOG.error(error)
raise self._get_exception()(error)
model_update = {'status': fields.GroupSnapshotStatus.DELETED}
snapshots_model_update = []
for snapshot in snapshots:
snapshots_model_update.append(
{
'id': snapshot['id'],
'status': fields.SnapshotStatus.DELETED,
})
return model_update, snapshots_model_update
def _generate_chap_secret(self, chap_name):
"""Returns chap secret generated according to chap_name
chap secret must be between 12-16 chaqnracters
"""
name = chap_name
chap_secret = ""
while len(chap_secret) < 12:
chap_secret = cryptish.encrypt(name)[:16]
name = name + '_'
LOG.debug("_generate_chap_secret: %(secret)s",
{'secret': chap_secret})
return chap_secret
@proxy._trace_time
def _create_chap(self, host=None):
"""Get CHAP name and secret
returns chap name and secret
chap_name and chap_secret must be 12-16 characters long
"""
if host:
if host['chap']:
chap_name = host['chap'][0]
LOG.debug("_create_chap: %(chap_name)s ",
{'chap_name': chap_name})
else:
chap_name = host['name']
else:
LOG.info("_create_chap: host missing!!!")
chap_name = "12345678901234"
chap_secret = self._generate_chap_secret(chap_name)
LOG.debug("_create_chap (new): %(chap_name)s ",
{'chap_name': chap_name})
return (chap_name, chap_secret)
@proxy._trace_time
def _get_host(self, connector):
"""Returns a host looked up via initiator."""
try:
host_bunch = self._get_bunch_from_host(connector)
except Exception as e:
details = self._get_code_and_status_or_message(e)
msg = (_("%(prefix)s. Invalid connector: '%(details)s.'") %
{'prefix': storage.XIV_LOG_PREFIX, 'details': details})
raise self._get_exception()(msg)
host = []
chap = None
all_hosts = self._call_xiv_xcli("host_list").as_list
if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI:
host = [host_obj for host_obj in all_hosts
if host_bunch['initiator']
in host_obj.iscsi_ports.split(',')]
else:
if 'wwpns' in connector:
if len(host_bunch['wwpns']) > 0:
wwpn_set = set([wwpn.lower() for wwpn
in host_bunch['wwpns']])
host = [host_obj for host_obj in all_hosts if
len(wwpn_set.intersection(host_obj.get(
'fc_ports', '').lower().split(','))) > 0]
else: # fake connector created by nova
host = [host_obj for host_obj in all_hosts
if host_obj.get('name', '') == connector['host']]
if len(host) == 1:
if self._is_iscsi() and host[0].iscsi_chap_name:
chap = (host[0].iscsi_chap_name,
self._generate_chap_secret(host[0].iscsi_chap_name))
LOG.debug("_get_host: chap_name %(chap_name)s ",
{'chap_name': host[0].iscsi_chap_name})
return self._get_bunch_from_host(
connector, host[0].id, host[0].name, chap)
LOG.debug("_get_host: returns None")
return None
@proxy._trace_time
def _call_host_define(self, host,
chap_name=None, chap_secret=None, domain_name=None):
"""Call host_define using XCLI."""
LOG.debug("host_define with domain: %s)", domain_name)
if domain_name:
if chap_name:
return self._call_xiv_xcli(
"host_define",
host=host,
iscsi_chap_name=chap_name,
iscsi_chap_secret=chap_secret,
domain=domain_name
).as_list[0]
else:
return self._call_xiv_xcli(
"host_define",
host=host,
domain=domain_name
).as_list[0]
else:
# No domain
if chap_name:
return self._call_xiv_xcli(
"host_define",
host=host,
iscsi_chap_name=chap_name,
iscsi_chap_secret=chap_secret
).as_list[0]
else:
return self._call_xiv_xcli(
"host_define",
host=host
).as_list[0]
@proxy._trace_time
def _define_host_according_to_chap(self, host, in_domain):
"""Check on chap state and define host accordingly."""
chap_name = None
chap_secret = None
if (self._get_connection_type() ==
storage.XIV_CONNECTION_TYPE_ISCSI and
self._get_chap_type() == storage.CHAP_ENABLED):
host_bunch = {'name': host, 'chap': None, }
chap = self._create_chap(host=host_bunch)
chap_name = chap[0]
chap_secret = chap[1]
LOG.debug("_define_host_according_to_chap: "
"%(name)s : %(secret)s",
{'name': chap_name, 'secret': chap_secret})
return self._call_host_define(
host=host,
chap_name=chap_name,
chap_secret=chap_secret,
domain_name=in_domain)
def _define_ports(self, host_bunch):
"""Defines ports in XIV."""
fc_targets = []
LOG.debug(host_bunch.get('name'))
if self._get_connection_type() == storage.XIV_CONNECTION_TYPE_ISCSI:
self._define_iscsi(host_bunch)
else:
fc_targets = self._define_fc(host_bunch)
fc_targets = list(set(fc_targets))
fc_targets.sort(key=self._sort_last_digit)
return fc_targets
def _get_pool_domain(self, connector):
pool_name = self._get_backend_pool()
LOG.debug("pool name from configuration: %s", pool_name)
domain = None
try:
domain = self._call_xiv_xcli(
"pool_list", pool=pool_name).as_list[0].get('domain')
LOG.debug("Pool's domain: %s", domain)
except AttributeError:
pass
return domain
@proxy._trace_time
def _define_host(self, connector):
"""Defines a host in XIV."""
domain = self._get_pool_domain(connector)
host_bunch = self._get_bunch_from_host(connector)
host = self._call_xiv_xcli(
"host_list", host=host_bunch['name']).as_list
connection_type = self._get_connection_type()
if len(host) == 0:
LOG.debug("Non existing host, defining")
host = self._define_host_according_to_chap(
host=host_bunch['name'], in_domain=domain)
host_bunch = self._get_bunch_from_host(connector,
host.get('id'))
else:
host_bunch = self._get_bunch_from_host(connector,
host[0].get('id'))
LOG.debug("Generating hostname for connector %(conn)s",
{'conn': connector})
generated_hostname = storage.get_host_or_create_from_iqn(
connector, connection=connection_type)
generated_host = self._call_xiv_xcli(
"host_list",
host=generated_hostname).as_list
if len(generated_host) == 0:
host = self._define_host_according_to_chap(
host=generated_hostname,
in_domain=domain)
else:
host = generated_host[0]
host_bunch = self._get_bunch_from_host(
connector, host.get('id'), host_name=generated_hostname)
LOG.debug("The host_bunch: %s", host_bunch)
return host_bunch
@proxy._trace_time
def _define_fc(self, host_bunch):
"""Define FC Connectivity."""
fc_targets = []
if len(host_bunch.get('wwpns')) > 0:
connected_wwpns = []
for wwpn in host_bunch.get('wwpns'):
component_ids = list(set(
[p.component_id for p in
self._call_xiv_xcli(
"fc_connectivity_list",
wwpn=wwpn.replace(":", ""))]))
wwpn_fc_target_lists = []
for component in component_ids:
wwpn_fc_target_lists += [fc_p.wwpn for fc_p in
self._call_xiv_xcli(
"fc_port_list",
fcport=component)]
LOG.debug("got %(tgts)s fc targets for wwpn %(wwpn)s",
{'tgts': wwpn_fc_target_lists, 'wwpn': wwpn})
if len(wwpn_fc_target_lists) > 0:
connected_wwpns += [wwpn]
fc_targets += wwpn_fc_target_lists
LOG.debug("adding fc port %s", wwpn)
self._call_xiv_xcli(
"host_add_port", host=host_bunch.get('name'),
fcaddress=wwpn)
if len(connected_wwpns) == 0:
LOG.error(CONNECTIVITY_FC_NO_TARGETS)
all_target_ports = self._get_all_target_ports()
fc_targets = list(set([target.get('wwpn')
for target in all_target_ports]))
else:
msg = _("No Fibre Channel HBA's are defined on the host.")
LOG.error(msg)
raise self._get_exception()(msg)
return fc_targets
@proxy._trace_time
def _define_iscsi(self, host_bunch):
"""Add iscsi ports."""
if host_bunch.get('initiator'):
LOG.debug("adding iscsi")
self._call_xiv_xcli(
"host_add_port", host=host_bunch.get('name'),
iscsi_name=host_bunch.get('initiator'))
else:
msg = _("No iSCSI initiator found!")
LOG.error(msg)
raise self._get_exception()(msg)
@proxy._trace_time
def _event_service_start(self):
"""Send an event when cinder service starts."""
LOG.debug("send event SERVICE_STARTED")
service_start_evnt_prop = {
"openstack_version": self.meta['openstack_version'],
"pool_name": self._get_backend_pool()}
ev_mgr = events.EventsManager(self.ibm_storage_cli,
OPENSTACK_PRODUCT_NAME,
self.full_version)
ev_mgr.send_event('SERVICE_STARTED', service_start_evnt_prop)
@proxy._trace_time
def _event_volume_attached(self):
"""Send an event when volume is attached to host."""
LOG.debug("send event VOLUME_ATTACHED")
compute_host_name = socket.getfqdn()
vol_attach_evnt_prop = {
"openstack_version": self.meta['openstack_version'],
"pool_name": self._get_backend_pool(),
"compute_hostname": compute_host_name}
ev_mgr = events.EventsManager(self.ibm_storage_cli,
OPENSTACK_PRODUCT_NAME,
self.full_version)
ev_mgr.send_event('VOLUME_ATTACHED', vol_attach_evnt_prop)
@proxy._trace_time
def _build_initiator_target_map(self, fc_targets, connector):
"""Build the target_wwns and the initiator target map."""
init_targ_map = {}
wwpns = connector.get('wwpns', [])
for initiator in wwpns:
init_targ_map[initiator] = fc_targets
LOG.debug("_build_initiator_target_map: %(init_targ_map)s",
{'init_targ_map': init_targ_map})
return init_targ_map
@proxy._trace_time
def _get_host_and_fc_targets(self, volume, connector):
"""Returns the host and its FC targets."""
LOG.debug("_get_host_and_fc_targets %(volume)s",
{'volume': volume['name']})
fc_targets = []
host = self._get_host(connector)
if not host:
host = self._define_host(connector)
fc_targets = self._define_ports(host)
elif self._get_connection_type() == storage.XIV_CONNECTION_TYPE_FC:
fc_targets = self._get_fc_targets(host)
if len(fc_targets) == 0:
LOG.error(CONNECTIVITY_FC_NO_TARGETS)
raise self._get_exception()(CONNECTIVITY_FC_NO_TARGETS)
return (fc_targets, host)
def _vol_map_and_get_lun_id(self, volume, connector, host):
"""Maps volume to instance.
Maps a volume to the nova volume node as host,
and return the created lun id
"""
vol_name = volume['name']
LOG.debug("_vol_map_and_get_lun_id %(volume)s",
{'volume': vol_name})
try:
mapped_vols = self._call_xiv_xcli(
"vol_mapping_list",
vol=vol_name).as_dict('host')
if host['name'] in mapped_vols:
LOG.info("Volume '%(volume)s' was already attached to "
"the host '%(host)s'.",
{'host': host['name'],
'volume': volume['name']})
return int(mapped_vols[host['name']].lun)
except errors.VolumeBadNameError:
LOG.error("Volume not found. '%s'", volume['name'])
raise self.meta['exception'].VolumeNotFound(volume_id=volume['id'])
used_luns = [int(mapped.get('lun')) for mapped in
self._call_xiv_xcli(
"mapping_list",
host=host['name']).as_list]
luns = six.moves.xrange(MIN_LUNID, MAX_LUNID) # pylint: disable=E1101
for lun_id in luns:
if lun_id not in used_luns:
self._call_xiv_xcli(
"map_vol",
lun=lun_id,
host=host['name'],
vol=vol_name)
self._event_volume_attached()
return lun_id
msg = _("All free LUN IDs were already mapped.")
LOG.error(msg)
raise self._get_exception()(msg)
@proxy._trace_time
def _get_all_target_ports(self):
all_target_ports = []
fc_port_list = self._call_xiv_xcli("fc_port_list")
all_target_ports += ([t for t in fc_port_list if
t.get('wwpn') != '0000000000000000' and
t.get('role') == 'Target' and
t.get('port_state') == 'Online'])
return all_target_ports
@proxy._trace_time
def _get_fc_targets(self, host):
"""Get FC targets
:host: A dictionary describing the host
:returns: array of FC target WWPNs
"""
target_wwpns = []
all_target_ports = self._get_all_target_ports()
if host:
host_conect_list = self._call_xiv_xcli("host_connectivity_list",
host=host.get('name'))
for connection in host_conect_list:
fc_port = connection.get('local_fc_port')
target_wwpns += (
[target.get('wwpn') for target in all_target_ports if
target.get('component_id') == fc_port])
if not target_wwpns:
LOG.debug('No fc targets found accessible to host: %s. Return list'
' of all available FC targets', host)
target_wwpns = ([target.get('wwpn')
for target in all_target_ports])
fc_targets = list(set(target_wwpns))
fc_targets.sort(key=self._sort_last_digit)
LOG.debug("fc_targets : %s", fc_targets)
return fc_targets
def _sort_last_digit(self, a):
return a[-1:]
@proxy._trace_time
def _get_xcli(self, xcli, backend_id):
"""Wrapper around XCLI to ensure that connection is up."""
if self.meta['bypass_connection_check']:
LOG.debug("_get_xcli(bypass mode)")
else:
if not xcli.is_connected():
xcli = self._init_xcli(backend_id)
return xcli
@proxy._trace_time
def _call_xiv_xcli(self, method, *args, **kwargs):
"""Wrapper around XCLI to call active storage."""
self.ibm_storage_cli = self._get_xcli(
self.ibm_storage_cli, self.active_backend_id)
if self.ibm_storage_cli:
LOG.info("_call_xiv_xcli #1: %s", method)
else:
LOG.debug("_call_xiv_xcli #2: %s", method)
return getattr(self.ibm_storage_cli.cmd, method)(*args, **kwargs)
@proxy._trace_time
def _call_remote_xiv_xcli(self, method, *args, **kwargs):
"""Wrapper around XCLI to call remote storage."""
remote_id = self._get_secondary_backend_id()
if not remote_id:
raise self._get_exception()(_("No remote backend found."))
self.ibm_storage_remote_cli = self._get_xcli(
self.ibm_storage_remote_cli, remote_id)
LOG.debug("_call_remote_xiv_xcli: %s", method)
return getattr(self.ibm_storage_remote_cli.cmd, method)(
*args,
**kwargs)
def _verify_xiv_flags(self, address, user, password):
"""Verify that the XIV flags were passed."""
if not user or not password:
raise self._get_exception()(_("No credentials found."))
if not address:
raise self._get_exception()(_("No host found."))
def _get_connection_params(self, backend_id=strings.PRIMARY_BACKEND_ID):
"""Get connection parameters.
returns a tuple containing address list, user, password,
according to backend_id
"""
if not backend_id or backend_id == strings.PRIMARY_BACKEND_ID:
if self._get_management_ips():
address = [e.strip(" ") for e in self.storage_info[
storage.FLAG_KEYS['management_ips']].split(",")]
else:
address = self.storage_info[storage.FLAG_KEYS['address']]
user = self.storage_info[storage.FLAG_KEYS['user']]
password = self.storage_info[storage.FLAG_KEYS['password']]
else:
params = self._get_target_params(backend_id)
if not params:
msg = (_("Missing target information for target '%(target)s'"),
{'target': backend_id})
LOG.error(msg)
raise self.meta['exception'].VolumeBackendAPIException(
data=msg)
if params.get('management_ips', None):
address = [e.strip(" ") for e in
params['management_ips'].split(",")]
else:
address = params['san_ip']
user = params['san_login']
password = params['san_password']
return (address, user, password)
@proxy._trace_time
def _init_xcli(self, backend_id=strings.PRIMARY_BACKEND_ID):
"""Initilize XCLI connection.
returns an XCLIClient object
"""
try:
address, user, password = self._get_connection_params(backend_id)
except Exception as e:
details = self._get_code_and_status_or_message(e)
ex_details = (SETUP_BASE_ERROR,
{'title': strings.TITLE, 'details': details})
LOG.error(ex_details)
raise self.meta['exception'].InvalidParameterValue(
(_("%(prefix)s %(ex_details)s") %
{'prefix': storage.XIV_LOG_PREFIX,
'ex_details': ex_details}))
self._verify_xiv_flags(address, user, password)
try:
clear_pass = cryptish.decrypt(password)
except TypeError:
ex_details = (SETUP_BASE_ERROR,
{'title': strings.TITLE,
'details': "Invalid password."})
LOG.error(ex_details)
raise self.meta['exception'].InvalidParameterValue(
(_("%(prefix)s %(ex_details)s") %
{'prefix': storage.XIV_LOG_PREFIX,
'ex_details': ex_details}))
certs = certificate.CertificateCollector()
path = certs.collect_certificate()
try:
LOG.debug('connect_multiendpoint_ssl with: %s', address)
xcli = client.XCLIClient.connect_multiendpoint_ssl(
user,
clear_pass,
address,
ca_certs=path)
except errors.CredentialsError:
LOG.error(SETUP_BASE_ERROR,
{'title': strings.TITLE,
'details': "Invalid credentials."})
raise self.meta['exception'].NotAuthorized()
except (errors.ConnectionError, transports.ClosedTransportError):
err_msg = (SETUP_INVALID_ADDRESS, {'address': address})
LOG.error(err_msg)
raise self.meta['exception'].HostNotFound(host=err_msg)
except Exception as er:
err_msg = (SETUP_BASE_ERROR %
{'title': strings.TITLE, 'details': er})
LOG.error(err_msg)
raise self._get_exception()(err_msg)
finally:
certs.free_certificate()
return xcli
| _update_stats |
operation_deser.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[allow(clippy::unnecessary_wraps)]
pub fn parse_accept_administrator_invitation_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::AcceptAdministratorInvitationOutput,
crate::error::AcceptAdministratorInvitationError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::AcceptAdministratorInvitationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::AcceptAdministratorInvitationError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::AcceptAdministratorInvitationError {
meta: generic,
kind: crate::error::AcceptAdministratorInvitationErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AcceptAdministratorInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::AcceptAdministratorInvitationError {
meta: generic,
kind: crate::error::AcceptAdministratorInvitationErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AcceptAdministratorInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::AcceptAdministratorInvitationError {
meta: generic,
kind: crate::error::AcceptAdministratorInvitationErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AcceptAdministratorInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::AcceptAdministratorInvitationError {
meta: generic,
kind: crate::error::AcceptAdministratorInvitationErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AcceptAdministratorInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::AcceptAdministratorInvitationError {
meta: generic,
kind: crate::error::AcceptAdministratorInvitationErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AcceptAdministratorInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
_ => crate::error::AcceptAdministratorInvitationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_accept_administrator_invitation_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::AcceptAdministratorInvitationOutput,
crate::error::AcceptAdministratorInvitationError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::accept_administrator_invitation_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_accept_invitation_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::AcceptInvitationOutput, crate::error::AcceptInvitationError>
{
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::AcceptInvitationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::AcceptInvitationError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::AcceptInvitationError {
meta: generic,
kind: crate::error::AcceptInvitationErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AcceptInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::AcceptInvitationError {
meta: generic,
kind: crate::error::AcceptInvitationErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AcceptInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::AcceptInvitationError {
meta: generic,
kind: crate::error::AcceptInvitationErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AcceptInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::AcceptInvitationError {
meta: generic,
kind: crate::error::AcceptInvitationErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::AcceptInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::AcceptInvitationError {
meta: generic,
kind: crate::error::AcceptInvitationErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::AcceptInvitationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::AcceptInvitationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_accept_invitation_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::AcceptInvitationOutput, crate::error::AcceptInvitationError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::accept_invitation_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_disable_standards_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::BatchDisableStandardsOutput,
crate::error::BatchDisableStandardsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::BatchDisableStandardsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::BatchDisableStandardsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::BatchDisableStandardsError {
meta: generic,
kind: crate::error::BatchDisableStandardsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchDisableStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::BatchDisableStandardsError {
meta: generic,
kind: crate::error::BatchDisableStandardsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchDisableStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::BatchDisableStandardsError {
meta: generic,
kind: crate::error::BatchDisableStandardsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchDisableStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::BatchDisableStandardsError {
meta: generic,
kind: crate::error::BatchDisableStandardsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchDisableStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::BatchDisableStandardsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_disable_standards_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::BatchDisableStandardsOutput,
crate::error::BatchDisableStandardsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::batch_disable_standards_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_batch_disable_standards(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchDisableStandardsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_enable_standards_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::BatchEnableStandardsOutput,
crate::error::BatchEnableStandardsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::BatchEnableStandardsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::BatchEnableStandardsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::BatchEnableStandardsError {
meta: generic,
kind: crate::error::BatchEnableStandardsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchEnableStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::BatchEnableStandardsError {
meta: generic,
kind: crate::error::BatchEnableStandardsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchEnableStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::BatchEnableStandardsError {
meta: generic,
kind: crate::error::BatchEnableStandardsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchEnableStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::BatchEnableStandardsError {
meta: generic,
kind: crate::error::BatchEnableStandardsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchEnableStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::BatchEnableStandardsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_enable_standards_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::BatchEnableStandardsOutput,
crate::error::BatchEnableStandardsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::batch_enable_standards_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_batch_enable_standards(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchEnableStandardsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_import_findings_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::BatchImportFindingsOutput,
crate::error::BatchImportFindingsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::BatchImportFindingsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::BatchImportFindingsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::BatchImportFindingsError {
meta: generic,
kind: crate::error::BatchImportFindingsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchImportFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::BatchImportFindingsError {
meta: generic,
kind: crate::error::BatchImportFindingsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchImportFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::BatchImportFindingsError {
meta: generic,
kind: crate::error::BatchImportFindingsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchImportFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::BatchImportFindingsError {
meta: generic,
kind: crate::error::BatchImportFindingsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchImportFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::BatchImportFindingsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_import_findings_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::BatchImportFindingsOutput,
crate::error::BatchImportFindingsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::batch_import_findings_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_batch_import_findings(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchImportFindingsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_update_findings_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::BatchUpdateFindingsOutput,
crate::error::BatchUpdateFindingsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::BatchUpdateFindingsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::BatchUpdateFindingsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::BatchUpdateFindingsError {
meta: generic,
kind: crate::error::BatchUpdateFindingsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchUpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::BatchUpdateFindingsError {
meta: generic,
kind: crate::error::BatchUpdateFindingsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchUpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::BatchUpdateFindingsError {
meta: generic,
kind: crate::error::BatchUpdateFindingsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchUpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::BatchUpdateFindingsError {
meta: generic,
kind: crate::error::BatchUpdateFindingsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchUpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::BatchUpdateFindingsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_batch_update_findings_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::BatchUpdateFindingsOutput,
crate::error::BatchUpdateFindingsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::batch_update_findings_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_batch_update_findings(
response.body().as_ref(),
output,
)
.map_err(crate::error::BatchUpdateFindingsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_action_target_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateActionTargetOutput,
crate::error::CreateActionTargetError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::CreateActionTargetError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateActionTargetError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::CreateActionTargetError {
meta: generic,
kind: crate::error::CreateActionTargetErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::CreateActionTargetError {
meta: generic,
kind: crate::error::CreateActionTargetErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::CreateActionTargetError {
meta: generic,
kind: crate::error::CreateActionTargetErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::CreateActionTargetError {
meta: generic,
kind: crate::error::CreateActionTargetErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceConflictException" => crate::error::CreateActionTargetError {
meta: generic,
kind: crate::error::CreateActionTargetErrorKind::ResourceConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_conflict_exception::Builder::default();
let _ = response;
output =
crate::json_deser::deser_structure_resource_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateActionTargetError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_action_target_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::CreateActionTargetOutput,
crate::error::CreateActionTargetError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_action_target_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_create_action_target(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateActionTargetError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_insight_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateInsightOutput, crate::error::CreateInsightError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::CreateInsightError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateInsightError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::CreateInsightError {
meta: generic,
kind: crate::error::CreateInsightErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::CreateInsightError {
meta: generic,
kind: crate::error::CreateInsightErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::CreateInsightError {
meta: generic,
kind: crate::error::CreateInsightErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::CreateInsightError {
meta: generic,
kind: crate::error::CreateInsightErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceConflictException" => crate::error::CreateInsightError {
meta: generic,
kind: crate::error::CreateInsightErrorKind::ResourceConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_conflict_exception::Builder::default();
let _ = response;
output =
crate::json_deser::deser_structure_resource_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateInsightError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_insight_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateInsightOutput, crate::error::CreateInsightError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_insight_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_create_insight(response.body().as_ref(), output)
.map_err(crate::error::CreateInsightError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_members_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateMembersOutput, crate::error::CreateMembersError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::CreateMembersError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::CreateMembersError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::CreateMembersError {
meta: generic,
kind: crate::error::CreateMembersErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::CreateMembersError {
meta: generic,
kind: crate::error::CreateMembersErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::CreateMembersError {
meta: generic,
kind: crate::error::CreateMembersErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::CreateMembersError {
meta: generic,
kind: crate::error::CreateMembersErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceConflictException" => crate::error::CreateMembersError {
meta: generic,
kind: crate::error::CreateMembersErrorKind::ResourceConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_conflict_exception::Builder::default();
let _ = response;
output =
crate::json_deser::deser_structure_resource_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::CreateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::CreateMembersError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_create_members_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::CreateMembersOutput, crate::error::CreateMembersError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::create_members_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_create_members(response.body().as_ref(), output)
.map_err(crate::error::CreateMembersError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_decline_invitations_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeclineInvitationsOutput,
crate::error::DeclineInvitationsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DeclineInvitationsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeclineInvitationsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DeclineInvitationsError {
meta: generic,
kind: crate::error::DeclineInvitationsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeclineInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DeclineInvitationsError {
meta: generic,
kind: crate::error::DeclineInvitationsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeclineInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DeclineInvitationsError {
meta: generic,
kind: crate::error::DeclineInvitationsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeclineInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DeclineInvitationsError {
meta: generic,
kind: crate::error::DeclineInvitationsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeclineInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DeclineInvitationsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_decline_invitations_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeclineInvitationsOutput,
crate::error::DeclineInvitationsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::decline_invitations_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_decline_invitations(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeclineInvitationsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_action_target_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteActionTargetOutput,
crate::error::DeleteActionTargetError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DeleteActionTargetError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteActionTargetError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DeleteActionTargetError {
meta: generic,
kind: crate::error::DeleteActionTargetErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DeleteActionTargetError {
meta: generic,
kind: crate::error::DeleteActionTargetErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DeleteActionTargetError {
meta: generic,
kind: crate::error::DeleteActionTargetErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DeleteActionTargetError {
meta: generic,
kind: crate::error::DeleteActionTargetErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DeleteActionTargetError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_action_target_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DeleteActionTargetOutput,
crate::error::DeleteActionTargetError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_action_target_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_delete_action_target(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteActionTargetError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_insight_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteInsightOutput, crate::error::DeleteInsightError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DeleteInsightError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteInsightError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DeleteInsightError {
meta: generic,
kind: crate::error::DeleteInsightErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DeleteInsightError {
meta: generic,
kind: crate::error::DeleteInsightErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DeleteInsightError {
meta: generic,
kind: crate::error::DeleteInsightErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteInsightError {
meta: generic,
kind: crate::error::DeleteInsightErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DeleteInsightError {
meta: generic,
kind: crate::error::DeleteInsightErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DeleteInsightError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_insight_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteInsightOutput, crate::error::DeleteInsightError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_insight_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_delete_insight(response.body().as_ref(), output)
.map_err(crate::error::DeleteInsightError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_invitations_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteInvitationsOutput, crate::error::DeleteInvitationsError>
{
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DeleteInvitationsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteInvitationsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DeleteInvitationsError {
meta: generic,
kind: crate::error::DeleteInvitationsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DeleteInvitationsError {
meta: generic,
kind: crate::error::DeleteInvitationsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DeleteInvitationsError {
meta: generic,
kind: crate::error::DeleteInvitationsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteInvitationsError {
meta: generic,
kind: crate::error::DeleteInvitationsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DeleteInvitationsError {
meta: generic,
kind: crate::error::DeleteInvitationsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DeleteInvitationsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_invitations_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteInvitationsOutput, crate::error::DeleteInvitationsError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_invitations_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_delete_invitations(response.body().as_ref(), output)
.map_err(crate::error::DeleteInvitationsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_members_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteMembersOutput, crate::error::DeleteMembersError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DeleteMembersError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DeleteMembersError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DeleteMembersError {
meta: generic,
kind: crate::error::DeleteMembersErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DeleteMembersError {
meta: generic,
kind: crate::error::DeleteMembersErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DeleteMembersError {
meta: generic,
kind: crate::error::DeleteMembersErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DeleteMembersError {
meta: generic,
kind: crate::error::DeleteMembersErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DeleteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DeleteMembersError {
meta: generic,
kind: crate::error::DeleteMembersErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DeleteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DeleteMembersError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_delete_members_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DeleteMembersOutput, crate::error::DeleteMembersError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::delete_members_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_delete_members(response.body().as_ref(), output)
.map_err(crate::error::DeleteMembersError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_action_targets_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeActionTargetsOutput,
crate::error::DescribeActionTargetsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DescribeActionTargetsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeActionTargetsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DescribeActionTargetsError {
meta: generic,
kind: crate::error::DescribeActionTargetsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeActionTargetsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DescribeActionTargetsError {
meta: generic,
kind: crate::error::DescribeActionTargetsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeActionTargetsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DescribeActionTargetsError {
meta: generic,
kind: crate::error::DescribeActionTargetsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeActionTargetsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DescribeActionTargetsError {
meta: generic,
kind: crate::error::DescribeActionTargetsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeActionTargetsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DescribeActionTargetsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_action_targets_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeActionTargetsOutput,
crate::error::DescribeActionTargetsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_action_targets_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_action_targets(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeActionTargetsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_hub_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeHubOutput, crate::error::DescribeHubError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DescribeHubError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeHubError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DescribeHubError {
meta: generic,
kind: crate::error::DescribeHubErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DescribeHubError {
meta: generic,
kind: crate::error::DescribeHubErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DescribeHubError {
meta: generic,
kind: crate::error::DescribeHubErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DescribeHubError {
meta: generic,
kind: crate::error::DescribeHubErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DescribeHubError {
meta: generic,
kind: crate::error::DescribeHubErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DescribeHubError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_hub_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeHubOutput, crate::error::DescribeHubError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_hub_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_hub(response.body().as_ref(), output)
.map_err(crate::error::DescribeHubError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_organization_configuration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeOrganizationConfigurationOutput,
crate::error::DescribeOrganizationConfigurationError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DescribeOrganizationConfigurationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeOrganizationConfigurationError::unhandled(generic))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DescribeOrganizationConfigurationError {
meta: generic,
kind: crate::error::DescribeOrganizationConfigurationErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeOrganizationConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => {
crate::error::DescribeOrganizationConfigurationError {
meta: generic,
kind:
crate::error::DescribeOrganizationConfigurationErrorKind::InvalidAccessException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeOrganizationConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"InvalidInputException" => {
crate::error::DescribeOrganizationConfigurationError {
meta: generic,
kind:
crate::error::DescribeOrganizationConfigurationErrorKind::InvalidInputException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeOrganizationConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
"LimitExceededException" => {
crate::error::DescribeOrganizationConfigurationError {
meta: generic,
kind:
crate::error::DescribeOrganizationConfigurationErrorKind::LimitExceededException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeOrganizationConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
_ => crate::error::DescribeOrganizationConfigurationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_organization_configuration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeOrganizationConfigurationOutput,
crate::error::DescribeOrganizationConfigurationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::describe_organization_configuration_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_organization_configuration(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeOrganizationConfigurationError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_products_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeProductsOutput, crate::error::DescribeProductsError>
{
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DescribeProductsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeProductsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DescribeProductsError {
meta: generic,
kind: crate::error::DescribeProductsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeProductsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DescribeProductsError {
meta: generic,
kind: crate::error::DescribeProductsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeProductsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DescribeProductsError {
meta: generic,
kind: crate::error::DescribeProductsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeProductsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DescribeProductsError {
meta: generic,
kind: crate::error::DescribeProductsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeProductsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeProductsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_products_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeProductsOutput, crate::error::DescribeProductsError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_products_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_describe_products(response.body().as_ref(), output)
.map_err(crate::error::DescribeProductsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_standards_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeStandardsOutput, crate::error::DescribeStandardsError>
{
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DescribeStandardsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DescribeStandardsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DescribeStandardsError {
meta: generic,
kind: crate::error::DescribeStandardsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DescribeStandardsError {
meta: generic,
kind: crate::error::DescribeStandardsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DescribeStandardsError {
meta: generic,
kind: crate::error::DescribeStandardsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DescribeStandardsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_standards_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::DescribeStandardsOutput, crate::error::DescribeStandardsError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_standards_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_describe_standards(response.body().as_ref(), output)
.map_err(crate::error::DescribeStandardsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_standards_controls_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeStandardsControlsOutput,
crate::error::DescribeStandardsControlsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DescribeStandardsControlsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DescribeStandardsControlsError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DescribeStandardsControlsError {
meta: generic,
kind: crate::error::DescribeStandardsControlsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeStandardsControlsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DescribeStandardsControlsError {
meta: generic,
kind: crate::error::DescribeStandardsControlsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeStandardsControlsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DescribeStandardsControlsError {
meta: generic,
kind: crate::error::DescribeStandardsControlsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeStandardsControlsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DescribeStandardsControlsError {
meta: generic,
kind: crate::error::DescribeStandardsControlsErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DescribeStandardsControlsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
_ => crate::error::DescribeStandardsControlsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_describe_standards_controls_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DescribeStandardsControlsOutput,
crate::error::DescribeStandardsControlsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::describe_standards_controls_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_describe_standards_controls(
response.body().as_ref(),
output,
)
.map_err(crate::error::DescribeStandardsControlsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disable_import_findings_for_product_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisableImportFindingsForProductOutput,
crate::error::DisableImportFindingsForProductError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DisableImportFindingsForProductError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DisableImportFindingsForProductError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DisableImportFindingsForProductError {
meta: generic,
kind: crate::error::DisableImportFindingsForProductErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DisableImportFindingsForProductError {
meta: generic,
kind: crate::error::DisableImportFindingsForProductErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DisableImportFindingsForProductError {
meta: generic,
kind: crate::error::DisableImportFindingsForProductErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DisableImportFindingsForProductError {
meta: generic,
kind: crate::error::DisableImportFindingsForProductErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::DisableImportFindingsForProductError {
meta: generic,
kind: crate::error::DisableImportFindingsForProductErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
_ => crate::error::DisableImportFindingsForProductError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disable_import_findings_for_product_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisableImportFindingsForProductOutput,
crate::error::DisableImportFindingsForProductError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::disable_import_findings_for_product_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disable_organization_admin_account_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisableOrganizationAdminAccountOutput,
crate::error::DisableOrganizationAdminAccountError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DisableOrganizationAdminAccountError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DisableOrganizationAdminAccountError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DisableOrganizationAdminAccountError {
meta: generic,
kind: crate::error::DisableOrganizationAdminAccountErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableOrganizationAdminAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DisableOrganizationAdminAccountError {
meta: generic,
kind: crate::error::DisableOrganizationAdminAccountErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableOrganizationAdminAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DisableOrganizationAdminAccountError {
meta: generic,
kind: crate::error::DisableOrganizationAdminAccountErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableOrganizationAdminAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DisableOrganizationAdminAccountError {
meta: generic,
kind: crate::error::DisableOrganizationAdminAccountErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableOrganizationAdminAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::DisableOrganizationAdminAccountError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disable_organization_admin_account_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisableOrganizationAdminAccountOutput,
crate::error::DisableOrganizationAdminAccountError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::disable_organization_admin_account_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disable_security_hub_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisableSecurityHubOutput,
crate::error::DisableSecurityHubError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DisableSecurityHubError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DisableSecurityHubError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DisableSecurityHubError {
meta: generic,
kind: crate::error::DisableSecurityHubErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DisableSecurityHubError {
meta: generic,
kind: crate::error::DisableSecurityHubErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DisableSecurityHubError {
meta: generic,
kind: crate::error::DisableSecurityHubErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DisableSecurityHubError {
meta: generic,
kind: crate::error::DisableSecurityHubErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DisableSecurityHubError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disable_security_hub_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisableSecurityHubOutput,
crate::error::DisableSecurityHubError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::disable_security_hub_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_from_administrator_account_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateFromAdministratorAccountOutput,
crate::error::DisassociateFromAdministratorAccountError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DisassociateFromAdministratorAccountError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DisassociateFromAdministratorAccountError::unhandled(generic))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DisassociateFromAdministratorAccountError { meta: generic, kind: crate::error::DisassociateFromAdministratorAccountErrorKind::InternalException({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateFromAdministratorAccountError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
"InvalidAccessException" => crate::error::DisassociateFromAdministratorAccountError { meta: generic, kind: crate::error::DisassociateFromAdministratorAccountErrorKind::InvalidAccessException({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateFromAdministratorAccountError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
"InvalidInputException" => crate::error::DisassociateFromAdministratorAccountError { meta: generic, kind: crate::error::DisassociateFromAdministratorAccountErrorKind::InvalidInputException({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateFromAdministratorAccountError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
"LimitExceededException" => crate::error::DisassociateFromAdministratorAccountError { meta: generic, kind: crate::error::DisassociateFromAdministratorAccountErrorKind::LimitExceededException({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateFromAdministratorAccountError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
"ResourceNotFoundException" => crate::error::DisassociateFromAdministratorAccountError { meta: generic, kind: crate::error::DisassociateFromAdministratorAccountErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]let mut tmp =
{
#[allow(unused_mut)]let mut output = crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateFromAdministratorAccountError::unhandled)?;
output.build()
}
;
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
})},
_ => crate::error::DisassociateFromAdministratorAccountError::generic(generic)
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_from_administrator_account_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateFromAdministratorAccountOutput,
crate::error::DisassociateFromAdministratorAccountError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::disassociate_from_administrator_account_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_from_master_account_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateFromMasterAccountOutput,
crate::error::DisassociateFromMasterAccountError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DisassociateFromMasterAccountError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::DisassociateFromMasterAccountError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DisassociateFromMasterAccountError {
meta: generic,
kind: crate::error::DisassociateFromMasterAccountErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateFromMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DisassociateFromMasterAccountError {
meta: generic,
kind: crate::error::DisassociateFromMasterAccountErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateFromMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DisassociateFromMasterAccountError {
meta: generic,
kind: crate::error::DisassociateFromMasterAccountErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateFromMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DisassociateFromMasterAccountError {
meta: generic,
kind: crate::error::DisassociateFromMasterAccountErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateFromMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::DisassociateFromMasterAccountError {
meta: generic,
kind: crate::error::DisassociateFromMasterAccountErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateFromMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
_ => crate::error::DisassociateFromMasterAccountError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_from_master_account_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateFromMasterAccountOutput,
crate::error::DisassociateFromMasterAccountError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::disassociate_from_master_account_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_members_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateMembersOutput,
crate::error::DisassociateMembersError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::DisassociateMembersError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::DisassociateMembersError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::DisassociateMembersError {
meta: generic,
kind: crate::error::DisassociateMembersErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::DisassociateMembersError {
meta: generic,
kind: crate::error::DisassociateMembersErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::DisassociateMembersError {
meta: generic,
kind: crate::error::DisassociateMembersErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::DisassociateMembersError {
meta: generic,
kind: crate::error::DisassociateMembersErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::DisassociateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::DisassociateMembersError {
meta: generic,
kind: crate::error::DisassociateMembersErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::DisassociateMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::DisassociateMembersError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_disassociate_members_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::DisassociateMembersOutput,
crate::error::DisassociateMembersError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::disassociate_members_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_enable_import_findings_for_product_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::EnableImportFindingsForProductOutput,
crate::error::EnableImportFindingsForProductError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::EnableImportFindingsForProductError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::EnableImportFindingsForProductError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::EnableImportFindingsForProductError {
meta: generic,
kind: crate::error::EnableImportFindingsForProductErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::EnableImportFindingsForProductError {
meta: generic,
kind: crate::error::EnableImportFindingsForProductErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::EnableImportFindingsForProductError {
meta: generic,
kind: crate::error::EnableImportFindingsForProductErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::EnableImportFindingsForProductError {
meta: generic,
kind: crate::error::EnableImportFindingsForProductErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceConflictException" => {
crate::error::EnableImportFindingsForProductError {
meta: generic,
kind:
crate::error::EnableImportFindingsForProductErrorKind::ResourceConflictException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_conflict_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_conflict_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::EnableImportFindingsForProductError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
}
}
_ => crate::error::EnableImportFindingsForProductError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_enable_import_findings_for_product_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::EnableImportFindingsForProductOutput,
crate::error::EnableImportFindingsForProductError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::enable_import_findings_for_product_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_enable_import_findings_for_product(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableImportFindingsForProductError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_enable_organization_admin_account_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::EnableOrganizationAdminAccountOutput,
crate::error::EnableOrganizationAdminAccountError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::EnableOrganizationAdminAccountError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::EnableOrganizationAdminAccountError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::EnableOrganizationAdminAccountError {
meta: generic,
kind: crate::error::EnableOrganizationAdminAccountErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableOrganizationAdminAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::EnableOrganizationAdminAccountError {
meta: generic,
kind: crate::error::EnableOrganizationAdminAccountErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableOrganizationAdminAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::EnableOrganizationAdminAccountError {
meta: generic,
kind: crate::error::EnableOrganizationAdminAccountErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableOrganizationAdminAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::EnableOrganizationAdminAccountError {
meta: generic,
kind: crate::error::EnableOrganizationAdminAccountErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableOrganizationAdminAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::EnableOrganizationAdminAccountError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_enable_organization_admin_account_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::EnableOrganizationAdminAccountOutput,
crate::error::EnableOrganizationAdminAccountError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::enable_organization_admin_account_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_enable_security_hub_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::EnableSecurityHubOutput, crate::error::EnableSecurityHubError>
{
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::EnableSecurityHubError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::EnableSecurityHubError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"AccessDeniedException" => crate::error::EnableSecurityHubError {
meta: generic,
kind: crate::error::EnableSecurityHubErrorKind::AccessDeniedException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::access_denied_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_access_denied_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InternalException" => crate::error::EnableSecurityHubError {
meta: generic,
kind: crate::error::EnableSecurityHubErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::EnableSecurityHubError {
meta: generic,
kind: crate::error::EnableSecurityHubErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::EnableSecurityHubError {
meta: generic,
kind: crate::error::EnableSecurityHubErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceConflictException" => crate::error::EnableSecurityHubError {
meta: generic,
kind: crate::error::EnableSecurityHubErrorKind::ResourceConflictException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::resource_conflict_exception::Builder::default();
let _ = response;
output =
crate::json_deser::deser_structure_resource_conflict_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::EnableSecurityHubError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::EnableSecurityHubError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_enable_security_hub_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::EnableSecurityHubOutput, crate::error::EnableSecurityHubError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::enable_security_hub_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_administrator_account_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetAdministratorAccountOutput,
crate::error::GetAdministratorAccountError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::GetAdministratorAccountError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::GetAdministratorAccountError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::GetAdministratorAccountError {
meta: generic,
kind: crate::error::GetAdministratorAccountErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetAdministratorAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::GetAdministratorAccountError {
meta: generic,
kind: crate::error::GetAdministratorAccountErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetAdministratorAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::GetAdministratorAccountError {
meta: generic,
kind: crate::error::GetAdministratorAccountErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetAdministratorAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetAdministratorAccountError {
meta: generic,
kind: crate::error::GetAdministratorAccountErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetAdministratorAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::GetAdministratorAccountError {
meta: generic,
kind: crate::error::GetAdministratorAccountErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetAdministratorAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::GetAdministratorAccountError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_administrator_account_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetAdministratorAccountOutput,
crate::error::GetAdministratorAccountError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_administrator_account_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_administrator_account(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetAdministratorAccountError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_enabled_standards_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetEnabledStandardsOutput,
crate::error::GetEnabledStandardsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::GetEnabledStandardsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetEnabledStandardsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::GetEnabledStandardsError {
meta: generic,
kind: crate::error::GetEnabledStandardsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetEnabledStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::GetEnabledStandardsError {
meta: generic,
kind: crate::error::GetEnabledStandardsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetEnabledStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::GetEnabledStandardsError {
meta: generic,
kind: crate::error::GetEnabledStandardsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetEnabledStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetEnabledStandardsError {
meta: generic,
kind: crate::error::GetEnabledStandardsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetEnabledStandardsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetEnabledStandardsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_enabled_standards_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetEnabledStandardsOutput,
crate::error::GetEnabledStandardsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_enabled_standards_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_enabled_standards(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetEnabledStandardsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_findings_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetFindingsOutput, crate::error::GetFindingsError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::GetFindingsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetFindingsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::GetFindingsError {
meta: generic,
kind: crate::error::GetFindingsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::GetFindingsError {
meta: generic,
kind: crate::error::GetFindingsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::GetFindingsError {
meta: generic,
kind: crate::error::GetFindingsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetFindingsError {
meta: generic,
kind: crate::error::GetFindingsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetFindingsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_findings_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetFindingsOutput, crate::error::GetFindingsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_findings_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_findings(response.body().as_ref(), output)
.map_err(crate::error::GetFindingsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_insight_results_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetInsightResultsOutput, crate::error::GetInsightResultsError>
{
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::GetInsightResultsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetInsightResultsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::GetInsightResultsError {
meta: generic,
kind: crate::error::GetInsightResultsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightResultsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::GetInsightResultsError {
meta: generic,
kind: crate::error::GetInsightResultsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightResultsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::GetInsightResultsError {
meta: generic,
kind: crate::error::GetInsightResultsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightResultsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetInsightResultsError {
meta: generic,
kind: crate::error::GetInsightResultsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightResultsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::GetInsightResultsError {
meta: generic,
kind: crate::error::GetInsightResultsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetInsightResultsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::GetInsightResultsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_insight_results_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetInsightResultsOutput, crate::error::GetInsightResultsError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_insight_results_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_insight_results(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightResultsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_insights_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetInsightsOutput, crate::error::GetInsightsError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::GetInsightsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetInsightsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::GetInsightsError {
meta: generic,
kind: crate::error::GetInsightsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::GetInsightsError {
meta: generic,
kind: crate::error::GetInsightsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::GetInsightsError {
meta: generic,
kind: crate::error::GetInsightsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetInsightsError {
meta: generic,
kind: crate::error::GetInsightsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::GetInsightsError {
meta: generic,
kind: crate::error::GetInsightsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetInsightsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::GetInsightsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_insights_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetInsightsOutput, crate::error::GetInsightsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_insights_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_insights(response.body().as_ref(), output)
.map_err(crate::error::GetInsightsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_invitations_count_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetInvitationsCountOutput,
crate::error::GetInvitationsCountError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::GetInvitationsCountError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetInvitationsCountError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::GetInvitationsCountError {
meta: generic,
kind: crate::error::GetInvitationsCountErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInvitationsCountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::GetInvitationsCountError {
meta: generic,
kind: crate::error::GetInvitationsCountErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInvitationsCountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::GetInvitationsCountError {
meta: generic,
kind: crate::error::GetInvitationsCountErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
) | output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetInvitationsCountError {
meta: generic,
kind: crate::error::GetInvitationsCountErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInvitationsCountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::GetInvitationsCountError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_invitations_count_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::GetInvitationsCountOutput,
crate::error::GetInvitationsCountError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_invitations_count_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_invitations_count(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetInvitationsCountError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_master_account_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetMasterAccountOutput, crate::error::GetMasterAccountError>
{
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::GetMasterAccountError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetMasterAccountError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::GetMasterAccountError {
meta: generic,
kind: crate::error::GetMasterAccountErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::GetMasterAccountError {
meta: generic,
kind: crate::error::GetMasterAccountErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::GetMasterAccountError {
meta: generic,
kind: crate::error::GetMasterAccountErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetMasterAccountError {
meta: generic,
kind: crate::error::GetMasterAccountErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::GetMasterAccountError {
meta: generic,
kind: crate::error::GetMasterAccountErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetMasterAccountError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::GetMasterAccountError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_master_account_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetMasterAccountOutput, crate::error::GetMasterAccountError>
{
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_master_account_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_get_master_account(response.body().as_ref(), output)
.map_err(crate::error::GetMasterAccountError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_members_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetMembersOutput, crate::error::GetMembersError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::GetMembersError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::GetMembersError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::GetMembersError {
meta: generic,
kind: crate::error::GetMembersErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::GetMembersError {
meta: generic,
kind: crate::error::GetMembersErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::GetMembersError {
meta: generic,
kind: crate::error::GetMembersErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::GetMembersError {
meta: generic,
kind: crate::error::GetMembersErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::GetMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::GetMembersError {
meta: generic,
kind: crate::error::GetMembersErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::GetMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::GetMembersError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_get_members_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::GetMembersOutput, crate::error::GetMembersError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::get_members_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_get_members(response.body().as_ref(), output)
.map_err(crate::error::GetMembersError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_invite_members_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::InviteMembersOutput, crate::error::InviteMembersError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::InviteMembersError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::InviteMembersError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::InviteMembersError {
meta: generic,
kind: crate::error::InviteMembersErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::InviteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::InviteMembersError {
meta: generic,
kind: crate::error::InviteMembersErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::InviteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::InviteMembersError {
meta: generic,
kind: crate::error::InviteMembersErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::InviteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::InviteMembersError {
meta: generic,
kind: crate::error::InviteMembersErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::InviteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::InviteMembersError {
meta: generic,
kind: crate::error::InviteMembersErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::InviteMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::InviteMembersError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_invite_members_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::InviteMembersOutput, crate::error::InviteMembersError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::invite_members_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_invite_members(response.body().as_ref(), output)
.map_err(crate::error::InviteMembersError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_enabled_products_for_import_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListEnabledProductsForImportOutput,
crate::error::ListEnabledProductsForImportError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListEnabledProductsForImportError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::ListEnabledProductsForImportError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::ListEnabledProductsForImportError {
meta: generic,
kind: crate::error::ListEnabledProductsForImportErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListEnabledProductsForImportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::ListEnabledProductsForImportError {
meta: generic,
kind: crate::error::ListEnabledProductsForImportErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListEnabledProductsForImportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::ListEnabledProductsForImportError {
meta: generic,
kind: crate::error::ListEnabledProductsForImportErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListEnabledProductsForImportError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListEnabledProductsForImportError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_enabled_products_for_import_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListEnabledProductsForImportOutput,
crate::error::ListEnabledProductsForImportError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_enabled_products_for_import_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_enabled_products_for_import(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListEnabledProductsForImportError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_invitations_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListInvitationsOutput, crate::error::ListInvitationsError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListInvitationsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListInvitationsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::ListInvitationsError {
meta: generic,
kind: crate::error::ListInvitationsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::ListInvitationsError {
meta: generic,
kind: crate::error::ListInvitationsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListInvitationsError {
meta: generic,
kind: crate::error::ListInvitationsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::ListInvitationsError {
meta: generic,
kind: crate::error::ListInvitationsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListInvitationsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListInvitationsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_invitations_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListInvitationsOutput, crate::error::ListInvitationsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_invitations_output::Builder::default();
let _ = response;
output =
crate::json_deser::deser_operation_list_invitations(response.body().as_ref(), output)
.map_err(crate::error::ListInvitationsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_members_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListMembersOutput, crate::error::ListMembersError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListMembersError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListMembersError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::ListMembersError {
meta: generic,
kind: crate::error::ListMembersErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::ListMembersError {
meta: generic,
kind: crate::error::ListMembersErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListMembersError {
meta: generic,
kind: crate::error::ListMembersErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::ListMembersError {
meta: generic,
kind: crate::error::ListMembersErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListMembersError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListMembersError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_members_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::ListMembersOutput, crate::error::ListMembersError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_members_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_members(response.body().as_ref(), output)
.map_err(crate::error::ListMembersError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_organization_admin_accounts_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListOrganizationAdminAccountsOutput,
crate::error::ListOrganizationAdminAccountsError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListOrganizationAdminAccountsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::ListOrganizationAdminAccountsError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::ListOrganizationAdminAccountsError {
meta: generic,
kind: crate::error::ListOrganizationAdminAccountsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListOrganizationAdminAccountsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::ListOrganizationAdminAccountsError {
meta: generic,
kind: crate::error::ListOrganizationAdminAccountsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListOrganizationAdminAccountsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListOrganizationAdminAccountsError {
meta: generic,
kind: crate::error::ListOrganizationAdminAccountsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListOrganizationAdminAccountsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::ListOrganizationAdminAccountsError {
meta: generic,
kind: crate::error::ListOrganizationAdminAccountsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListOrganizationAdminAccountsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::ListOrganizationAdminAccountsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_organization_admin_accounts_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListOrganizationAdminAccountsOutput,
crate::error::ListOrganizationAdminAccountsError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_organization_admin_accounts_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_organization_admin_accounts(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListOrganizationAdminAccountsError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_tags_for_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::ListTagsForResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::ListTagsForResourceError {
meta: generic,
kind: crate::error::ListTagsForResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::ListTagsForResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_list_tags_for_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::ListTagsForResourceOutput,
crate::error::ListTagsForResourceError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::list_tags_for_resource_output::Builder::default();
let _ = response;
output = crate::json_deser::deser_operation_list_tags_for_resource(
response.body().as_ref(),
output,
)
.map_err(crate::error::ListTagsForResourceError::unhandled)?;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_tag_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::TagResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::TagResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::TagResourceError {
meta: generic,
kind: crate::error::TagResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::TagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::TagResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_tag_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::TagResourceOutput, crate::error::TagResourceError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::tag_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_untag_resource_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::UntagResourceError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UntagResourceError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::UntagResourceError {
meta: generic,
kind: crate::error::UntagResourceErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::UntagResourceError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::UntagResourceError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_untag_resource_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UntagResourceOutput, crate::error::UntagResourceError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::untag_resource_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_action_target_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateActionTargetOutput,
crate::error::UpdateActionTargetError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::UpdateActionTargetError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UpdateActionTargetError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::UpdateActionTargetError {
meta: generic,
kind: crate::error::UpdateActionTargetErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::UpdateActionTargetError {
meta: generic,
kind: crate::error::UpdateActionTargetErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::UpdateActionTargetError {
meta: generic,
kind: crate::error::UpdateActionTargetErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::UpdateActionTargetError {
meta: generic,
kind: crate::error::UpdateActionTargetErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::UpdateActionTargetError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::UpdateActionTargetError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_action_target_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateActionTargetOutput,
crate::error::UpdateActionTargetError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::update_action_target_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_findings_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UpdateFindingsOutput, crate::error::UpdateFindingsError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::UpdateFindingsError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UpdateFindingsError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::UpdateFindingsError {
meta: generic,
kind: crate::error::UpdateFindingsErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::UpdateFindingsError {
meta: generic,
kind: crate::error::UpdateFindingsErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::UpdateFindingsError {
meta: generic,
kind: crate::error::UpdateFindingsErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::UpdateFindingsError {
meta: generic,
kind: crate::error::UpdateFindingsErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::UpdateFindingsError {
meta: generic,
kind: crate::error::UpdateFindingsErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::UpdateFindingsError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::UpdateFindingsError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_findings_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UpdateFindingsOutput, crate::error::UpdateFindingsError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::update_findings_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_insight_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UpdateInsightOutput, crate::error::UpdateInsightError> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::UpdateInsightError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UpdateInsightError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::UpdateInsightError {
meta: generic,
kind: crate::error::UpdateInsightErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::UpdateInsightError {
meta: generic,
kind: crate::error::UpdateInsightErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::UpdateInsightError {
meta: generic,
kind: crate::error::UpdateInsightErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::UpdateInsightError {
meta: generic,
kind: crate::error::UpdateInsightErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::UpdateInsightError {
meta: generic,
kind: crate::error::UpdateInsightErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::UpdateInsightError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::UpdateInsightError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_insight_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<crate::output::UpdateInsightOutput, crate::error::UpdateInsightError> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::update_insight_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_organization_configuration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateOrganizationConfigurationOutput,
crate::error::UpdateOrganizationConfigurationError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::UpdateOrganizationConfigurationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UpdateOrganizationConfigurationError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::UpdateOrganizationConfigurationError {
meta: generic,
kind: crate::error::UpdateOrganizationConfigurationErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateOrganizationConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::UpdateOrganizationConfigurationError {
meta: generic,
kind: crate::error::UpdateOrganizationConfigurationErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateOrganizationConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::UpdateOrganizationConfigurationError {
meta: generic,
kind: crate::error::UpdateOrganizationConfigurationErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateOrganizationConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::UpdateOrganizationConfigurationError {
meta: generic,
kind: crate::error::UpdateOrganizationConfigurationErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateOrganizationConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
_ => crate::error::UpdateOrganizationConfigurationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_organization_configuration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateOrganizationConfigurationOutput,
crate::error::UpdateOrganizationConfigurationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::update_organization_configuration_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_security_hub_configuration_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateSecurityHubConfigurationOutput,
crate::error::UpdateSecurityHubConfigurationError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::UpdateSecurityHubConfigurationError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => return Err(crate::error::UpdateSecurityHubConfigurationError::unhandled(generic)),
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::UpdateSecurityHubConfigurationError {
meta: generic,
kind: crate::error::UpdateSecurityHubConfigurationErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateSecurityHubConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::UpdateSecurityHubConfigurationError {
meta: generic,
kind: crate::error::UpdateSecurityHubConfigurationErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateSecurityHubConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::UpdateSecurityHubConfigurationError {
meta: generic,
kind: crate::error::UpdateSecurityHubConfigurationErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateSecurityHubConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"LimitExceededException" => crate::error::UpdateSecurityHubConfigurationError {
meta: generic,
kind: crate::error::UpdateSecurityHubConfigurationErrorKind::LimitExceededException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::limit_exceeded_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_limit_exceeded_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateSecurityHubConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => crate::error::UpdateSecurityHubConfigurationError {
meta: generic,
kind: crate::error::UpdateSecurityHubConfigurationErrorKind::ResourceNotFoundException(
{
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::UpdateSecurityHubConfigurationError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
},
),
},
_ => crate::error::UpdateSecurityHubConfigurationError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_security_hub_configuration_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateSecurityHubConfigurationOutput,
crate::error::UpdateSecurityHubConfigurationError,
> {
Ok({
#[allow(unused_mut)]
let mut output =
crate::output::update_security_hub_configuration_output::Builder::default();
let _ = response;
output.build()
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_standards_control_error(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateStandardsControlOutput,
crate::error::UpdateStandardsControlError,
> {
let generic = crate::json_deser::parse_generic_error(&response)
.map_err(crate::error::UpdateStandardsControlError::unhandled)?;
let error_code = match generic.code() {
Some(code) => code,
None => {
return Err(crate::error::UpdateStandardsControlError::unhandled(
generic,
))
}
};
let _error_message = generic.message().map(|msg| msg.to_owned());
Err(match error_code {
"InternalException" => crate::error::UpdateStandardsControlError {
meta: generic,
kind: crate::error::UpdateStandardsControlErrorKind::InternalException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::internal_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_internal_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateStandardsControlError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidAccessException" => crate::error::UpdateStandardsControlError {
meta: generic,
kind: crate::error::UpdateStandardsControlErrorKind::InvalidAccessException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_access_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_access_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateStandardsControlError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"InvalidInputException" => crate::error::UpdateStandardsControlError {
meta: generic,
kind: crate::error::UpdateStandardsControlErrorKind::InvalidInputException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output = crate::error::invalid_input_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_invalid_input_exceptionjson_err(
response.body().as_ref(),
output,
)
.map_err(crate::error::UpdateStandardsControlError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
},
"ResourceNotFoundException" => {
crate::error::UpdateStandardsControlError {
meta: generic,
kind: crate::error::UpdateStandardsControlErrorKind::ResourceNotFoundException({
#[allow(unused_mut)]
let mut tmp = {
#[allow(unused_mut)]
let mut output =
crate::error::resource_not_found_exception::Builder::default();
let _ = response;
output = crate::json_deser::deser_structure_resource_not_found_exceptionjson_err(response.body().as_ref(), output).map_err(crate::error::UpdateStandardsControlError::unhandled)?;
output.build()
};
if (&tmp.message).is_none() {
tmp.message = _error_message;
}
tmp
}),
}
}
_ => crate::error::UpdateStandardsControlError::generic(generic),
})
}
#[allow(clippy::unnecessary_wraps)]
pub fn parse_update_standards_control_response(
response: &http::Response<bytes::Bytes>,
) -> std::result::Result<
crate::output::UpdateStandardsControlOutput,
crate::error::UpdateStandardsControlError,
> {
Ok({
#[allow(unused_mut)]
let mut output = crate::output::update_standards_control_output::Builder::default();
let _ = response;
output.build()
})
} | .map_err(crate::error::GetInvitationsCountError::unhandled)?; |
balances.py | #!/usr/bin/env python3
# Copyright (c) 2018-2021 The MobileCoin Foundation
"""
The purpose of this script is to print the balances for all keys in
a given account directory.
Example setup and usage:
```
python3 balances.py --key-dir ../../../target/sample_data/master/keys/
```
"""
import argparse
import grpc
import mobilecoind_api_pb2
import mobilecoind_api_pb2_grpc
import os
from accounts import connect, load_key_and_register
from google.protobuf.empty_pb2 import Empty
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("--mobilecoind-host",
default="localhost",
type=str,
help="Mobilecoind host")
parser.add_argument("--mobilecoind-port",
default="4444",
type=str,
help="Mobilecoind port")
parser.add_argument("--key-dir",
required=True,
type=str,
help="Path to account key dir")
parser.add_argument("--prune",
action="store_true",
help="Prune key files for accounts with 0 balance")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(args)
stub = connect(args.mobilecoind_host, args.mobilecoind_port)
block_count = stub.GetLedgerInfo(Empty()).block_count | filter(lambda x: x.endswith(".json"), os.listdir(args.key_dir))):
print(keyfile)
account_data = load_key_and_register(
os.path.join(args.key_dir, keyfile), stub)
# Get starting balance
request = mobilecoind_api_pb2.GetMonitorStatusRequest(monitor_id=account_data.monitor_id)
monitor_block = stub.GetMonitorStatus(request).status.next_block
if block_count != monitor_block:
print(f"\tAccount not synced.")
else:
resp = stub.GetBalance(
mobilecoind_api_pb2.GetBalanceRequest(monitor_id=account_data.monitor_id))
balance = resp.balance
total += balance
print(f"\tBalance: {resp.balance:,}")
# Remove balances of 0 FIXME: MC-367 also from mobilecoind wallet
if int(balance) == 0 and args.prune:
os.remove(os.path.join(args.key_dir, keyfile))
print(f"Total balance of key collection: {total:,} PicoMob") | total = 0
for keyfile in sorted( |
state.rs | use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use cosmwasm_std::{CanonicalAddr, Storage};
use cosmwasm_storage::{singleton, singleton_read, ReadonlySingleton, Singleton};
| pub struct State {
pub count: i32,
pub owner: CanonicalAddr,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
pub struct ContractInfo {
pub name: String,
pub symbol: String,
}
pub fn config<S: Storage>(storage: &mut S) -> Singleton<S, State> {
singleton(storage, CONFIG_KEY)
}
pub fn config_read<S: Storage>(storage: &S) -> ReadonlySingleton<S, State> {
singleton_read(storage, CONFIG_KEY)
} | pub static CONFIG_KEY: &[u8] = b"config";
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] |
ColorfulNumbers.js | function colorfulNumbers(n) {
console.log('<ul>')
for(i=1 ; i<=n ; i++){
if(i%2 != 0){
console.log(` <li><span style=\'color:green\'>${i}</span></li>`) | else {
console.log(` <li><span style=\'color:blue\'>${i}</span></li>`)
}
}
console.log('</ul>')
}
colorfulNumbers(10) | } |
operations.rs | #![doc = "generated by AutoRust"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
#![allow(clippy::redundant_clone)]
use super::models;
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::auth::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> azure_core::error::Result<azure_core::Response> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(
endpoint: impl Into<String>,
credential: std::sync::Arc<dyn azure_core::auth::TokenCredential>,
scopes: Vec<String>,
) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn agent_pools(&self) -> agent_pools::Client {
agent_pools::Client(self.clone())
}
pub fn maintenance_configurations(&self) -> maintenance_configurations::Client {
maintenance_configurations::Client(self.clone())
}
pub fn managed_cluster_snapshots(&self) -> managed_cluster_snapshots::Client {
managed_cluster_snapshots::Client(self.clone())
}
pub fn managed_clusters(&self) -> managed_clusters::Client {
managed_clusters::Client(self.clone())
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
pub fn private_endpoint_connections(&self) -> private_endpoint_connections::Client {
private_endpoint_connections::Client(self.clone())
}
pub fn private_link_resources(&self) -> private_link_resources::Client {
private_link_resources::Client(self.clone())
}
pub fn resolve_private_link_service_id(&self) -> resolve_private_link_service_id::Client {
resolve_private_link_service_id::Client(self.clone())
}
pub fn snapshots(&self) -> snapshots::Client {
snapshots::Client(self.clone())
}
pub fn trusted_access_role_bindings(&self) -> trusted_access_role_bindings::Client {
trusted_access_role_bindings::Client(self.clone())
}
pub fn trusted_access_roles(&self) -> trusted_access_roles::Client {
trusted_access_roles::Client(self.clone())
}
}
pub mod operations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of operations."]
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::OperationListResult;
#[derive(Clone)]
pub struct | {
pub(crate) client: super::super::Client,
}
impl Builder {
#[doc = "only the first response will be fetched as the continuation token is not part of the response schema"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!("{}/providers/Microsoft.ContainerService/operations", this.client.endpoint(),);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::OperationListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod managed_clusters {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets supported OS options in the specified subscription."]
pub fn get_os_options(&self, subscription_id: impl Into<String>, location: impl Into<String>) -> get_os_options::Builder {
get_os_options::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
location: location.into(),
resource_type: None,
}
}
#[doc = "Gets a list of managed clusters in the specified subscription."]
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
#[doc = "Lists managed clusters in the specified subscription and resource group."]
pub fn list_by_resource_group(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
}
}
#[doc = "Gets the upgrade profile of a managed cluster."]
pub fn get_upgrade_profile(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> get_upgrade_profile::Builder {
get_upgrade_profile::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Gets an access profile of a managed cluster."]
pub fn get_access_profile(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
role_name: impl Into<String>,
) -> get_access_profile::Builder {
get_access_profile::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
role_name: role_name.into(),
}
}
#[doc = "Lists the admin credentials of a managed cluster."]
pub fn list_cluster_admin_credentials(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list_cluster_admin_credentials::Builder {
list_cluster_admin_credentials::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
server_fqdn: None,
}
}
#[doc = "Lists the user credentials of a managed cluster."]
pub fn list_cluster_user_credentials(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list_cluster_user_credentials::Builder {
list_cluster_user_credentials::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
server_fqdn: None,
format: None,
}
}
#[doc = "Lists the cluster monitoring user credentials of a managed cluster."]
pub fn list_cluster_monitoring_user_credentials(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list_cluster_monitoring_user_credentials::Builder {
list_cluster_monitoring_user_credentials::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
server_fqdn: None,
}
}
#[doc = "Gets a managed cluster."]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Creates or updates a managed cluster."]
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::ManagedCluster>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Updates tags on a managed cluster."]
pub fn update_tags(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::TagsObject>,
) -> update_tags::Builder {
update_tags::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Deletes a managed cluster."]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
ignore_pod_disruption_budget: None,
}
}
#[doc = "Reset the Service Principal Profile of a managed cluster."]
pub fn reset_service_principal_profile(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::ManagedClusterServicePrincipalProfile>,
) -> reset_service_principal_profile::Builder {
reset_service_principal_profile::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Reset the AAD Profile of a managed cluster."]
pub fn reset_aad_profile(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::ManagedClusterAadProfile>,
) -> reset_aad_profile::Builder {
reset_aad_profile::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Rotates the certificates of a managed cluster."]
pub fn rotate_cluster_certificates(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> rotate_cluster_certificates::Builder {
rotate_cluster_certificates::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Rotates the service account signing keys of a managed cluster."]
pub fn rotate_service_account_signing_keys(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> rotate_service_account_signing_keys::Builder {
rotate_service_account_signing_keys::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Stops a Managed Cluster"]
pub fn stop(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> stop::Builder {
stop::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Starts a previously stopped Managed Cluster"]
pub fn start(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> start::Builder {
start::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Submits a command to run against the Managed Cluster."]
pub fn run_command(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
request_payload: impl Into<models::RunCommandRequest>,
) -> run_command::Builder {
run_command::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
request_payload: request_payload.into(),
}
}
#[doc = "Gets the results of a command which has been run on the Managed Cluster."]
pub fn get_command_result(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
command_id: impl Into<String>,
) -> get_command_result::Builder {
get_command_result::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
command_id: command_id.into(),
}
}
#[doc = "Gets a list of egress endpoints (network endpoints of all outbound dependencies) in the specified managed cluster."]
pub fn list_outbound_network_dependencies_endpoints(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list_outbound_network_dependencies_endpoints::Builder {
list_outbound_network_dependencies_endpoints::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
}
pub mod get_os_options {
use super::models;
use azure_core::error::ResultExt;
type Response = models::OsOptionProfile;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) location: String,
pub(crate) resource_type: Option<String>,
}
impl Builder {
pub fn resource_type(mut self, resource_type: impl Into<String>) -> Self {
self.resource_type = Some(resource_type.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ContainerService/locations/{}/osOptions/default",
this.client.endpoint(),
&this.subscription_id,
&this.location
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
if let Some(resource_type) = &this.resource_type {
url.query_pairs_mut().append_pair("resource-type", resource_type);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::OsOptionProfile = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedClusterListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ContainerService/managedClusters",
this.client.endpoint(),
&this.subscription_id
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod list_by_resource_group {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedClusterListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod get_upgrade_profile {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedClusterUpgradeProfile;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/upgradeProfiles/default" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterUpgradeProfile = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod get_access_profile {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedClusterAccessProfile;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) role_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/accessProfiles/{}/listCredential" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . role_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterAccessProfile = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod list_cluster_admin_credentials {
use super::models;
use azure_core::error::ResultExt;
type Response = models::CredentialResults;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) server_fqdn: Option<String>,
}
impl Builder {
pub fn server_fqdn(mut self, server_fqdn: impl Into<String>) -> Self {
self.server_fqdn = Some(server_fqdn.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/listClusterAdminCredential" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
if let Some(server_fqdn) = &this.server_fqdn {
url.query_pairs_mut().append_pair("server-fqdn", server_fqdn);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::CredentialResults = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod list_cluster_user_credentials {
use super::models;
use azure_core::error::ResultExt;
type Response = models::CredentialResults;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) server_fqdn: Option<String>,
pub(crate) format: Option<String>,
}
impl Builder {
pub fn server_fqdn(mut self, server_fqdn: impl Into<String>) -> Self {
self.server_fqdn = Some(server_fqdn.into());
self
}
pub fn format(mut self, format: impl Into<String>) -> Self {
self.format = Some(format.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/listClusterUserCredential" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
if let Some(server_fqdn) = &this.server_fqdn {
url.query_pairs_mut().append_pair("server-fqdn", server_fqdn);
}
if let Some(format) = &this.format {
url.query_pairs_mut().append_pair("format", format);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::CredentialResults = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod list_cluster_monitoring_user_credentials {
use super::models;
use azure_core::error::ResultExt;
type Response = models::CredentialResults;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) server_fqdn: Option<String>,
}
impl Builder {
pub fn server_fqdn(mut self, server_fqdn: impl Into<String>) -> Self {
self.server_fqdn = Some(server_fqdn.into());
self
}
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/listClusterMonitoringUserCredential" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
if let Some(server_fqdn) = &this.server_fqdn {
url.query_pairs_mut().append_pair("server-fqdn", server_fqdn);
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::CredentialResults = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedCluster;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedCluster = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200(models::ManagedCluster),
Created201(models::ManagedCluster),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::ManagedCluster,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedCluster = serde_json::from_slice(&rsp_body)?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedCluster = serde_json::from_slice(&rsp_body)?;
Ok(Response::Created201(rsp_value))
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod update_tags {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedCluster;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::TagsObject,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedCluster = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) ignore_pod_disruption_budget: Option<bool>,
}
impl Builder {
pub fn ignore_pod_disruption_budget(mut self, ignore_pod_disruption_budget: bool) -> Self {
self.ignore_pod_disruption_budget = Some(ignore_pod_disruption_budget);
self
}
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
if let Some(ignore_pod_disruption_budget) = &this.ignore_pod_disruption_budget {
url.query_pairs_mut()
.append_pair("ignore-pod-disruption-budget", &ignore_pod_disruption_budget.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod reset_service_principal_profile {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::ManagedClusterServicePrincipalProfile,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/resetServicePrincipalProfile" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod reset_aad_profile {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::ManagedClusterAadProfile,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/resetAADProfile",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod rotate_cluster_certificates {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/rotateClusterCertificates" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod rotate_service_account_signing_keys {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/rotateServiceAccountSigningKeys" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod stop {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/stop",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod start {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/start",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod run_command {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Accepted202,
Ok200(models::RunCommandResult),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) request_payload: models::RunCommandRequest,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/runCommand",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.request_payload)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::RunCommandResult = serde_json::from_slice(&rsp_body)?;
Ok(Response::Ok200(rsp_value))
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod get_command_result {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Accepted202,
Ok200(models::RunCommandResult),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) command_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/commandResults/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . command_id) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::RunCommandResult = serde_json::from_slice(&rsp_body)?;
Ok(Response::Ok200(rsp_value))
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod list_outbound_network_dependencies_endpoints {
use super::models;
use azure_core::error::ResultExt;
type Response = models::OutboundEnvironmentEndpointCollection;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/outboundNetworkDependenciesEndpoints" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::OutboundEnvironmentEndpointCollection = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
}
pub mod maintenance_configurations {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of maintenance configurations in the specified managed cluster."]
pub fn list_by_managed_cluster(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list_by_managed_cluster::Builder {
list_by_managed_cluster::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Gets the specified maintenance configuration of a managed cluster."]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
config_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
config_name: config_name.into(),
}
}
#[doc = "Creates or updates a maintenance configuration in the specified managed cluster."]
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
config_name: impl Into<String>,
parameters: impl Into<models::MaintenanceConfiguration>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
config_name: config_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Deletes a maintenance configuration."]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
config_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
config_name: config_name.into(),
}
}
}
pub mod list_by_managed_cluster {
use super::models;
use azure_core::error::ResultExt;
type Response = models::MaintenanceConfigurationListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/maintenanceConfigurations" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::MaintenanceConfigurationListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::MaintenanceConfiguration;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) config_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/maintenanceConfigurations/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . config_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::MaintenanceConfiguration = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
use azure_core::error::ResultExt;
type Response = models::MaintenanceConfiguration;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) config_name: String,
pub(crate) parameters: models::MaintenanceConfiguration,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/maintenanceConfigurations/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . config_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::MaintenanceConfiguration = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) config_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/maintenanceConfigurations/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . config_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod agent_pools {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of agent pools in the specified managed cluster."]
pub fn list(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Gets the specified managed cluster agent pool."]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
agent_pool_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
agent_pool_name: agent_pool_name.into(),
}
}
#[doc = "Creates or updates an agent pool in the specified managed cluster."]
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
agent_pool_name: impl Into<String>,
parameters: impl Into<models::AgentPool>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
agent_pool_name: agent_pool_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Deletes an agent pool in the specified managed cluster."]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
agent_pool_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
agent_pool_name: agent_pool_name.into(),
ignore_pod_disruption_budget: None,
}
}
#[doc = "Gets the upgrade profile for an agent pool."]
pub fn get_upgrade_profile(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
agent_pool_name: impl Into<String>,
) -> get_upgrade_profile::Builder {
get_upgrade_profile::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
agent_pool_name: agent_pool_name.into(),
}
}
#[doc = "Gets a list of supported Kubernetes versions for the specified agent pool."]
pub fn get_available_agent_pool_versions(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> get_available_agent_pool_versions::Builder {
get_available_agent_pool_versions::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Upgrades the node image version of an agent pool to the latest."]
pub fn upgrade_node_image_version(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
agent_pool_name: impl Into<String>,
) -> upgrade_node_image_version::Builder {
upgrade_node_image_version::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
agent_pool_name: agent_pool_name.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::AgentPoolListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/agentPools",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::AgentPoolListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::AgentPool;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) agent_pool_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/agentPools/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name,
&this.agent_pool_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::AgentPool = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200(models::AgentPool),
Created201(models::AgentPool),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) agent_pool_name: String,
pub(crate) parameters: models::AgentPool,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/agentPools/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name,
&this.agent_pool_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::AgentPool = serde_json::from_slice(&rsp_body)?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::AgentPool = serde_json::from_slice(&rsp_body)?;
Ok(Response::Created201(rsp_value))
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Accepted202,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) agent_pool_name: String,
pub(crate) ignore_pod_disruption_budget: Option<bool>,
}
impl Builder {
pub fn ignore_pod_disruption_budget(mut self, ignore_pod_disruption_budget: bool) -> Self {
self.ignore_pod_disruption_budget = Some(ignore_pod_disruption_budget);
self
}
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/agentPools/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name,
&this.agent_pool_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
if let Some(ignore_pod_disruption_budget) = &this.ignore_pod_disruption_budget {
url.query_pairs_mut()
.append_pair("ignore-pod-disruption-budget", &ignore_pod_disruption_budget.to_string());
}
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => Ok(Response::Accepted202),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod get_upgrade_profile {
use super::models;
use azure_core::error::ResultExt;
type Response = models::AgentPoolUpgradeProfile;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) agent_pool_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/agentPools/{}/upgradeProfiles/default" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . agent_pool_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::AgentPoolUpgradeProfile = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod get_available_agent_pool_versions {
use super::models;
use azure_core::error::ResultExt;
type Response = models::AgentPoolAvailableVersions;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/availableAgentPoolVersions" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::AgentPoolAvailableVersions = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod upgrade_node_image_version {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
Accepted202(models::AgentPool),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) agent_pool_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/agentPools/{}/upgradeNodeImageVersion" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . agent_pool_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::AgentPool = serde_json::from_slice(&rsp_body)?;
Ok(Response::Accepted202(rsp_value))
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod private_endpoint_connections {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of private endpoint connections in the specified managed cluster."]
pub fn list(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Gets the specified private endpoint connection."]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
private_endpoint_connection_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
private_endpoint_connection_name: private_endpoint_connection_name.into(),
}
}
#[doc = "Updates a private endpoint connection."]
pub fn update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
private_endpoint_connection_name: impl Into<String>,
parameters: impl Into<models::PrivateEndpointConnection>,
) -> update::Builder {
update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
private_endpoint_connection_name: private_endpoint_connection_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Deletes a private endpoint connection."]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
private_endpoint_connection_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
private_endpoint_connection_name: private_endpoint_connection_name.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::PrivateEndpointConnectionListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/privateEndpointConnections" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::PrivateEndpointConnectionListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::PrivateEndpointConnection;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) private_endpoint_connection_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/privateEndpointConnections/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . private_endpoint_connection_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::PrivateEndpointConnection = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod update {
use super::models;
use azure_core::error::ResultExt;
type Response = models::PrivateEndpointConnection;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) private_endpoint_connection_name: String,
pub(crate) parameters: models::PrivateEndpointConnection,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/privateEndpointConnections/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . private_endpoint_connection_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::PrivateEndpointConnection = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) private_endpoint_connection_name: String,
}
impl Builder {
#[doc = "only the first response will be fetched as long running operations are not supported yet"]
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/privateEndpointConnections/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . private_endpoint_connection_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod private_link_resources {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of private link resources in the specified managed cluster."]
pub fn list(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::PrivateLinkResourcesListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/privateLinkResources" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::PrivateLinkResourcesListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod resolve_private_link_service_id {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets the private link service ID for the specified managed cluster."]
pub fn post(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::PrivateLinkResource>,
) -> post::Builder {
post::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
}
pub mod post {
use super::models;
use azure_core::error::ResultExt;
type Response = models::PrivateLinkResource;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::PrivateLinkResource,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/resolvePrivateLinkServiceId" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::PrivateLinkResource = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod snapshots {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of snapshots in the specified subscription."]
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
#[doc = "Lists snapshots in the specified subscription and resource group."]
pub fn list_by_resource_group(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
}
}
#[doc = "Gets a snapshot."]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Creates or updates a snapshot."]
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::Snapshot>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Updates tags on a snapshot."]
pub fn update_tags(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::TagsObject>,
) -> update_tags::Builder {
update_tags::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Deletes a snapshot."]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::SnapshotListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ContainerService/snapshots",
this.client.endpoint(),
&this.subscription_id
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::SnapshotListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod list_by_resource_group {
use super::models;
use azure_core::error::ResultExt;
type Response = models::SnapshotListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/snapshots",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::SnapshotListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::Snapshot;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/snapshots/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::Snapshot = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200(models::Snapshot),
Created201(models::Snapshot),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::Snapshot,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/snapshots/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::Snapshot = serde_json::from_slice(&rsp_body)?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::Snapshot = serde_json::from_slice(&rsp_body)?;
Ok(Response::Created201(rsp_value))
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod update_tags {
use super::models;
use azure_core::error::ResultExt;
type Response = models::Snapshot;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::TagsObject,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/snapshots/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::Snapshot = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/snapshots/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod managed_cluster_snapshots {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "Gets a list of managed cluster snapshots in the specified subscription."]
pub fn list(&self, subscription_id: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
#[doc = "Lists managed cluster snapshots in the specified subscription and resource group."]
pub fn list_by_resource_group(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
) -> list_by_resource_group::Builder {
list_by_resource_group::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
}
}
#[doc = "Gets a managed cluster snapshot."]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Creates or updates a managed cluster snapshot."]
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::ManagedClusterSnapshot>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Updates tags on a managed cluster snapshot."]
pub fn update_tags(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
parameters: impl Into<models::TagsObject>,
) -> update_tags::Builder {
update_tags::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
parameters: parameters.into(),
}
}
#[doc = "Deletes a managed cluster snapshot."]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedClusterSnapshotListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ContainerService/managedclustersnapshots",
this.client.endpoint(),
&this.subscription_id
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterSnapshotListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod list_by_resource_group {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedClusterSnapshotListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedclustersnapshots",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterSnapshotListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedClusterSnapshot;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedclustersnapshots/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterSnapshot = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200(models::ManagedClusterSnapshot),
Created201(models::ManagedClusterSnapshot),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::ManagedClusterSnapshot,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedclustersnapshots/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterSnapshot = serde_json::from_slice(&rsp_body)?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterSnapshot = serde_json::from_slice(&rsp_body)?;
Ok(Response::Created201(rsp_value))
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod update_tags {
use super::models;
use azure_core::error::ResultExt;
type Response = models::ManagedClusterSnapshot;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) parameters: models::TagsObject,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedclustersnapshots/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.parameters)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::ManagedClusterSnapshot = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedclustersnapshots/{}",
this.client.endpoint(),
&this.subscription_id,
&this.resource_group_name,
&this.resource_name
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
pub mod trusted_access_roles {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "List supported trusted access roles."]
pub fn list(&self, subscription_id: impl Into<String>, location: impl Into<String>) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
location: location.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::TrustedAccessRoleListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) location: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.ContainerService/locations/{}/trustedAccessRoles",
this.client.endpoint(),
&this.subscription_id,
&this.location
);
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::TrustedAccessRoleListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
}
pub mod trusted_access_role_bindings {
use super::models;
pub struct Client(pub(crate) super::Client);
impl Client {
#[doc = "List trusted access role bindings."]
pub fn list(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
) -> list::Builder {
list::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
}
}
#[doc = "Get a trusted access role binding."]
pub fn get(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
trusted_access_role_binding_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
trusted_access_role_binding_name: trusted_access_role_binding_name.into(),
}
}
#[doc = "Create or update a trusted access role binding"]
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
trusted_access_role_binding_name: impl Into<String>,
trusted_access_role_binding: impl Into<models::TrustedAccessRoleBinding>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
trusted_access_role_binding_name: trusted_access_role_binding_name.into(),
trusted_access_role_binding: trusted_access_role_binding.into(),
}
}
#[doc = "Delete a trusted access role binding."]
pub fn delete(
&self,
subscription_id: impl Into<String>,
resource_group_name: impl Into<String>,
resource_name: impl Into<String>,
trusted_access_role_binding_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
resource_group_name: resource_group_name.into(),
resource_name: resource_name.into(),
trusted_access_role_binding_name: trusted_access_role_binding_name.into(),
}
}
}
pub mod list {
use super::models;
use azure_core::error::ResultExt;
type Response = models::TrustedAccessRoleBindingListResult;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
}
impl Builder {
pub fn into_stream(self) -> azure_core::Pageable<Response, azure_core::error::Error> {
let make_request = move |continuation: Option<azure_core::prelude::Continuation>| {
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/trustedAccessRoleBindings" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::Other, "build request")?;
let mut req_builder = http::request::Builder::new();
let rsp = match continuation {
Some(token) => {
url.set_path("");
url = url
.join(&token.into_raw())
.context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let has_api_version_already = url.query_pairs().any(|(k, _)| k == "api-version");
if !has_api_version_already {
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
}
req_builder = req_builder.uri(url.as_str());
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
let req_body = azure_core::EMPTY_BODY;
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
None => {
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder =
req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
this.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?
}
};
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::TrustedAccessRoleBindingListResult = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
};
azure_core::Pageable::new(make_request)
}
}
}
pub mod get {
use super::models;
use azure_core::error::ResultExt;
type Response = models::TrustedAccessRoleBinding;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) trusted_access_role_binding_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/trustedAccessRoleBindings/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . trusted_access_role_binding_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::TrustedAccessRoleBinding = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod create_or_update {
use super::models;
use azure_core::error::ResultExt;
type Response = models::TrustedAccessRoleBinding;
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) trusted_access_role_binding_name: String,
pub(crate) trusted_access_role_binding: models::TrustedAccessRoleBinding,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/trustedAccessRoleBindings/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . trusted_access_role_binding_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(&this.trusted_access_role_binding)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await?;
let rsp_value: models::TrustedAccessRoleBinding = serde_json::from_slice(&rsp_body)?;
Ok(rsp_value)
}
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
pub mod delete {
use super::models;
use azure_core::error::ResultExt;
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) resource_group_name: String,
pub(crate) resource_name: String,
pub(crate) trusted_access_role_binding_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, azure_core::error::Result<Response>> {
Box::pin({
let this = self.clone();
async move {
let url_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/managedClusters/{}/trustedAccessRoleBindings/{}" , this . client . endpoint () , & this . subscription_id , & this . resource_group_name , & this . resource_name , & this . trusted_access_role_binding_name) ;
let mut url = url::Url::parse(url_str).context(azure_core::error::ErrorKind::DataConversion, "parse url")?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = this.client.token_credential();
let token_response = credential
.get_token(&this.client.scopes().join(" "))
.await
.context(azure_core::error::ErrorKind::Other, "get bearer token")?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", "2022-05-02-preview");
let req_body = azure_core::EMPTY_BODY;
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.context(azure_core::error::ErrorKind::Other, "build request")?;
let rsp = this
.client
.send(req)
.await
.context(azure_core::error::ErrorKind::Io, "execute request")?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => Ok(Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => Err(azure_core::error::Error::from(azure_core::error::ErrorKind::HttpResponse {
status: status_code.as_u16(),
error_code: None,
})),
}
}
})
}
}
}
}
| Builder |
spec.js | import React from 'react';
import ReactDOM from 'react-dom';
import { EmailInput } from '.';
it('renders without crashing', () => { | }); | const div = document.createElement('div');
ReactDOM.render(<EmailInput />, div);
ReactDOM.unmountComponentAtNode(div); |
block.py | import math
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D
from .sn_linear import SNLinear
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
def _downsample(x):
return F.average_pooling_2d(x, 2)
def upsample_conv(x, conv):
return conv(_upsample(x))
def _upsample_frq(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))
def _downsample_frq(x):
return F.average_pooling_2d(x, (1,2))
def upsample_conv_frq(x, conv):
return conv(_upsample_frq(x))
class ResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):
super(ResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
self.learnable_sc = in_channels != out_channels
self.dr = dr
self.bn = bn
with self.init_scope():
self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
if bn:
self.b1 = L.BatchNormalization(out_channels)
self.b2 = L.BatchNormalization(out_channels)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.c1(h)
if self.bn:
h = self.b1(h)
if self.activation:
h = self.activation(h)
if self.mode:
h = self.mode(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c2(h)
if self.bn:
h = self.b2(h)
if self.activation:
h = self.activation(h)
return h
def shortcut(self, x):
if self.mode:
x = self.mode(x)
if self.learnable_sc:
x = self.c_sc(x)
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class ConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class CoPSBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True):
super(CoPSBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
with self.init_scope():
self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer)
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer)
if bn:
self.b = L.BatchNormalization(out_channels)
def pixel_shuffle(self, x):
out = self.ps(x)
b = out.shape[0]
c = out.shape[1]
h = out.shape[2]
w = out.shape[3]
out = F.reshape(out, (b, 2, 2, c//4, h, w))
out = F.transpose(out, (0, 3, 4, 1, 5, 2))
out = F.reshape(out, (b, c//4, h*2, w*2))
return out
def __call__(self, h):
h = self.pixel_shuffle(h)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None):
super(SNResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None
self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up'
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer)
self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
if self.sample:
h = self.sample(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.sample:
return self.sample(x)
else:
return x
else:
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class SNConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(SNConvBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNLinearBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None):
super(SNLinearBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
if type(out_channels) is tuple:
self.out_shape = (-1,)+out_channels
else:
self.out_shape = None
with self.init_scope():
self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = self.l(x)
x = self.activation(x)
if self.out_shape:
x = F.reshape(x, self.out_shape)
return x
class SNMDBlock(chainer.Chain):
def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None):
super(SNMDBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.B = B
self.C = C
self.dr = dr
self.gap = gap
if gap:
in_size = 1
if type(in_size) is int:
in_size = (in_size, in_size)
with self.init_scope():
self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer)
self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer)
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
class SNL1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNL1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer)
| if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class L1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(L1DBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class CLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None):
super(CLBlock, self).__init__()
self.dr = dr
if out_ch - liner_out_ch <= 0:
raise Exception('out_ch <= liner_out_ch!')
with self.init_scope():
self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation)
self.l = L1DBlock(in_ch, liner_out_ch, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
class SNCLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNCLBlock, self).__init__()
self.dr = dr
with self.init_scope():
self.c = SNConvBlock(in_ch, out_ch-1, activation=activation)
self.l = SNL1DBlock(in_ch, 1, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h | def __call__(self, x): |
output_actions.rs | use syntax::ast;
use syntax::parse::token;
use syntax::ext::base::{DummyResult, ExtCtxt, MacEager, MacResult};
use syntax::ext::quote::rt::ToTokens;
use syntax::print::pprust::tts_to_string;
use syntax::tokenstream::TokenTree;
use codegen::IntoWriteStmt;
use simple_expr::{SimpleExpr, js_write};
use js_write::{WriteJs, JsWrite, WriteJsSimpleExpr};
pub trait WriteOutputActions {
fn write_output_actions(&self, w: &mut OutputActionWrite);
}
pub trait OutputActionWrite {
fn write_output_action(&mut self, output_action: &OutputAction);
}
pub trait IntoOutputActions {
fn into_output_actions(&self) -> Vec<OutputAction>;
}
/// Represents a type of action to perform when rendering
#[derive(Clone, Debug)]
pub enum OutputAction {
// Text and computed values
Write(String),
WriteResult(SimpleExpr),
// Elements
WriteOpen(String),
WriteClose(String),
WriteVoid(String),
}
mod output_strings {
use super::{OutputAction, WriteOutputActions};
use syntax::codemap::{DUMMY_SP, Span};
use syntax::ext::base::ExtCtxt;
use codegen::lang::{Lang, Js, Html};
use codegen::output_string_writer::{WriteOutputStrings, OutputStringWrite};
impl<S: WriteOutputActions> WriteOutputStrings<Html> for S {
fn write_output_strings<'s, 'cx>(&self, ecx: &'cx ExtCtxt, w: &'s mut OutputStringWrite<Html>) {
let mut output_actions = Vec::new();
self.write_output_actions(&mut output_actions);
for output_action in &output_actions {
output_action.write_output_strings(ecx, w);
}
}
}
impl WriteOutputStrings<Html> for OutputAction {
fn write_output_strings<'s, 'cx>(&self, ecx: &'cx ExtCtxt, w: &'s mut OutputStringWrite<Html>) {
ecx.span_warn(DUMMY_SP, &format!("Writing output action: {:?}", &self));
match self {
&OutputAction::Write(ref contents) => {
ecx.span_warn(DUMMY_SP, &format!("Writing output string for Write output action: {}", contents));
w.write_output_string(ecx, &contents);
},
&OutputAction::WriteResult(ref simple_expr) => {
ecx.span_warn(DUMMY_SP, &format!("Writing output string for WriteResult"));
&simple_expr.write_output_strings(ecx, w);
},
&OutputAction::WriteOpen(ref element_type) => {
w.write_output_string(ecx, &format!("<{}>", &element_type));
},
&OutputAction::WriteClose(ref element_type) => {
w.write_output_string(ecx, &format!("</{}>", &element_type));
},
&OutputAction::WriteVoid(ref element_type) => {
w.write_output_string(ecx, &format!("<{} />", &element_type));
}
}
}
}
}
impl OutputActionWrite for Vec<OutputAction> {
fn write_output_action(&mut self, output_action: &OutputAction) {
self.push(output_action.clone());
}
}
impl WriteJs for OutputAction {
//fn write_js<W>(&self, js: &mut W) where W: JsWrite {
fn write_js(&self, js: &mut JsWrite) {
match *self {
OutputAction::Write(ref contents) => {
js.call_method("IncrementalDOM.text", &|pl| {
pl.param(&|ex| {
ex.string_lit(&contents);
});
});
},
OutputAction::WriteResult(ref template_expr) => {
js.call_method("IncrementalDOM.text", &|pl| {
pl.param(&|ex| {
template_expr.write_js_simple_expr(ex);
});
});
},
OutputAction::WriteOpen(ref element_type) => {
js.call_method("IncrementalDOM.elementOpen", &|pl| {
pl.param(&|ex| {
ex.string_lit(&element_type);
});
});
},
OutputAction::WriteClose(ref element_type) => {
js.call_method("IncrementalDOM.elementClose", &|pl| {
pl.param(&|ex| {
ex.string_lit(&element_type);
});
});
},
OutputAction::WriteVoid(ref element_type) => {
js.call_method("IncrementalDOM.elementVoid", &|pl| {
pl.param(&|ex| {
ex.string_lit(&element_type);
});
});
}
}
}
}
impl WriteJs for Vec<OutputAction> {
fn write_js(&self, js: &mut JsWrite) {
for output_action in self { | output_action.write_js(js);
}
}
}
impl<S: WriteOutputActions> WriteJs for S {
fn write_js(&self, js: &mut JsWrite) {
let mut output_actions = Vec::new();
self.write_output_actions(&mut output_actions);
for output_action in &output_actions {
output_action.write_js(js);
}
}
} | |
unity.py | import sys
def | ():
return sys.path | get |
24.d.ts | import * as React from "react"; | export default ZoomReset24; | import { CarbonIconProps } from "../../";
declare const ZoomReset24: React.ForwardRefExoticComponent<
CarbonIconProps & React.RefAttributes<SVGSVGElement>
>; |
folium_maps.py | # Importing the necessary libraries
import pandas as pd
import geopandas as gpd
import fiona
import matplotlib.pyplot as plt
import folium
import os
from folium.plugins import StripePattern
dir=os.path.dirname("/home/ado/Desktop/new_datacranchers/data_crunchers_knbs/app/data_processing/open_source_data_values/folium_maps_data/")
# Loading the datasets
core_healthworkforce = gpd.read_file(os.path.join(dir,"core_healthworkforce.geojson"))
govt_open_late_night = gpd.read_file(os.path.join(dir,"govt_open_late_night.geojson"))
govt_open_public_holidays = gpd.read_file(os.path.join(dir,"govt_open_public_holidays.geojson"))
govt_open_weekends = gpd.read_file(os.path.join(dir,"govt_open_weekends.geojson"))
govt_open_whole_day = gpd.read_file(os.path.join(dir,'govt_open_whole_day.geojson'))
nongovt_open_late_night = gpd.read_file(os.path.join(dir,"nongovt_open_late_night.geojson"))
nongovt_open_public_holidays = gpd.read_file(os.path.join(dir,"nongovt_open_public_holidays.geojson"))
nongovt_open_weekends = gpd.read_file(os.path.join(dir,"nongovt_open_weekends.geojson"))
nongovt_open_whole_day = gpd.read_file(os.path.join(dir,'nongovt_open_whole_day.geojson'))
homes_with_fixed_internet = gpd.read_file(os.path.join(dir,"homes_fixed_with_internet.geojson"))
human_waste_disposal = gpd.read_file(os.path.join(dir,"human_waste_disposal.geojson"))
internet_through_mobile = gpd.read_file(os.path.join(dir,"internet_through_mobile.geojson"))
internet_users = gpd.read_file(os.path.join(dir,"internet_users.geojson"))
main_source_of_drinking_water = gpd.read_file(os.path.join(dir,"main_source_of_drinking_water.geojson"))
place_of_birth = gpd.read_file(os.path.join(dir,"place_of_birth.geojson"))
# Naming the dataframes
core_healthworkforce.name = 'core_healthworkforce'
govt_open_late_night.name = 'govt_open_late_night'
govt_open_public_holidays.name = 'govt_open_public_holidays'
govt_open_weekends.name = 'govt_open_weekends'
govt_open_whole_day.name = 'govt_open_whole_day'
nongovt_open_late_night.name = 'nongovt_open_late_night'
nongovt_open_public_holidays.name = 'nongovt_open_public_holidays'
nongovt_open_weekends.name = 'nongovt_open_weekends'
nongovt_open_whole_day.name = 'nongovt_open_whole_day'
homes_with_fixed_internet.name = 'homes_with_fixed_internet'
human_waste_disposal.name = 'human_waste_disposal'
internet_through_mobile.name = 'internet_through_mobile'
internet_users.name = 'internet_users'
main_source_of_drinking_water.name = 'main_source_of_drinking_water'
place_of_birth.name = 'place_of_birth'
# The mapping function
def | (geojson_data):
#creating Kenya map object
KEN=folium.Map(location=[0.0236,37.9062], zoom_start=7)
if geojson_data.name == 'core_healthworkforce':
clmn = ('objectid','\% change')
col = 'Greys'
nm = 'Healthworkforce'
lgd_name = ('Core Healthworkforce')
elif geojson_data.name == 'govt_open_late_night':
clmn = ('objectid','No')
col = 'Purples'
nm = 'Govt_Open_Late_Night'
lgd_name = ('Government Hospitals Open Late Night')
elif geojson_data.name == 'govt_open_public_holidays':
clmn = ('objectid','No')
col = 'Blues'
nm = 'Govt_Open_Public_Holidays'
lgd_name = ('Government Hospitals Open on Public Holidays')
elif geojson_data.name == 'govt_open_weekends':
clmn = ('objectid','No')
col = 'Greens'
nm = 'Govt_Open_Weekends'
lgd_name = ('Government Hospitals Open on Weekends')
elif geojson_data.name == 'govt_open_whole_day':
clmn = ('objectid','No')
col = 'Oranges'
nm = 'Govt_Open_Whole_Day'
lgd_name = ('Government Hospitals Open Whole Day')
elif geojson_data.name == 'nongovt_open_late_night':
clmn = ('objectid','No')
col = 'Reds'
nm = 'Nongovt_Open_Late_Night'
lgd_name = ('Non-Governmental Hospitals Open Late Night')
elif geojson_data.name == 'nongovt_open_public_holidays':
clmn = ('objectid','No')
col = 'YlOrBr'
nm = 'Nongovt_Open_Public_Holidays'
lgd_name = ('Non-Governmental Hospitals Open on Public Holidays')
elif geojson_data.name == 'nongovt_open_weekends':
clmn = ('objectid','No')
col = 'YlOrRd'
nm = 'Nongovt_Open_Weekends'
lgd_name = ('Non-Governmental Hospitals Open on Weekends')
elif geojson_data.name == 'nongovt_open_whole_day':
clmn = ('objectid','No')
col = 'OrRd'
nm = 'Nongovt_Open_Whole_Day'
lgd_name = ('Non-Governmental Hospitals Open Whole Day')
elif geojson_data.name == 'homes_with_fixed_internet':
clmn = ('objectid','No')
col = 'PuRd'
nm = 'Fixed_Internet'
lgd_name = ('Households with Fixed Internet at Home')
elif geojson_data.name == 'human_waste_disposal':
clmn = ('objectid','Improper')
col = 'RdPu'
nm = 'Human_Waste_Disposal'
lgd_name = ('Households Modes of Human Waste Disposal')
elif geojson_data.name == 'internet_through_mobile':
clmn = ('objectid','No')
col = 'BuPu'
nm = 'Internet_Through_Mobile'
lgd_name = ('Households that Accessed Internet Through Mobile')
elif geojson_data.name == 'internet_users':
clmn = ('objectid','No')
col = 'GnBu'
nm = 'Internet_Users'
lgd_name = ('Persons that Accessed Internet in the Last Three Months')
elif geojson_data.name == 'main_source_of_drinking_water':
clmn = ('objectid','Unsafe')
col = 'PuBu'
nm = 'Drinking_Water'
lgd_name = ('Households Main Source of Drinking Water')
else:
clmn = ('objectid','Non Health Facility')
col = 'YlGnBu'
nm = 'Place_Of_Birth'
lgd_name = ('Women who gave Birth in a Non-Health Facility')
choropleth= folium.Choropleth(
geo_data = geojson_data,
data=geojson_data,
columns= clmn,
key_on=('feature.properties.objectid'),
fill_color=(col),
fill_opacity=0.8,
nan_fill_opacity=0.4,
line_opacity=0.5,
name= nm,
show=True,
overlay=True,
legend_name= lgd_name,
highlight=True,
nan_fill_color = "black",
reset=True
).add_to(KEN)
# Add hover functionality.
style_function = lambda x: {'fillColor': '#ffffff',
'color':'#000000',
'fillOpacity': 0.1,
'weight': 0.1}
highlight_function = lambda x: {'fillColor': '#000000',
'color':'#000000',
'fillOpacity': 0.50,
'weight': 0.1}
# Add dark and light mode.
folium.TileLayer('cartodbdark_matter',name="dark mode",control=True).add_to(KEN)
folium.TileLayer('cartodbpositron',name="light mode",control=True).add_to(KEN)
# We add a layer controller.
folium.LayerControl(collapsed=False).add_to(KEN)
children = list(geojson_data.drop(['objectid', 'geometry'], axis=1).columns)
choropleth.geojson.add_child(folium.features.GeoJsonTooltip(children, labels=True))
return KEN.save('app/templates/maps_templates/'+nm+'.html')
#lst=['core_healthworkforce.geojson','govt_open_late_night.geojson','govt_open_public_holidays.geojson','govt_open_weekends.geojson','govt_open_whole_day.geojson','homes_fixed_with_internet.geojson','human_waste_disposal.geojson','internet_through_mobile.geojson','internet_users.geojson','main_source_of_drinking_water.geojson','nongovt_open_late_night.geojson','non_govt_open_public_holidays.geojson','nongovt_open_weekends.geojson','non_govt_open_whole_day.geojson','place_of_birth.geojson']
loc=os.path.join(dir,'core_healthworkforce.geojson')
file_=gpd.read_file(loc)
mapping_func(file_)
| mapping_func |
lib.rs | mod input;
#[cfg(feature = "gamepad")]
use chargrid_gamepad::GamepadContext;
pub use chargrid_input;
pub use chargrid_input::{Input, MouseInput};
use chargrid_input::{MouseButton, ScrollDirection};
use chargrid_runtime::{app, on_frame, on_input, Component, FrameBuffer, Rgba32};
use grid_2d::Coord;
pub use grid_2d::Size;
use js_sys::Function;
use std::cell::RefCell;
use std::rc::Rc;
pub use std::time::Duration;
use wasm_bindgen::prelude::*;
use wasm_bindgen::JsCast;
use web_sys::{Element, HtmlElement, KeyboardEvent, MouseEvent, Node, WheelEvent};
fn rgba32_to_web_colour(Rgba32 { r, g, b, a }: Rgba32) -> String {
format!("rgba({},{},{},{})", r, g, b, a as f64 / 255.0)
}
struct ElementCell {
element: HtmlElement,
character: char,
bold: bool,
underline: bool,
foreground: Rgba32,
background: Rgba32,
}
impl ElementCell {
fn with_element(element: HtmlElement) -> Self {
element.set_inner_html(" ");
let element_style = element.style();
element_style
.set_property("color", "rgb(255,255,255)")
.unwrap();
element_style
.set_property("background-color", "rgb(0,0,0)")
.unwrap();
Self {
element,
character: ' ',
bold: false,
underline: false,
foreground: Rgba32::new_grey(0),
background: Rgba32::new_grey(0),
}
}
}
#[derive(Debug)]
struct ElementDisplayInfo {
container_x: f64,
container_y: f64,
cell_width: f64,
cell_height: f64,
}
impl ElementDisplayInfo {
fn mouse_coord(&self, x: i32, y: i32) -> Coord {
let x = (x - self.container_x as i32) / self.cell_width as i32;
let y = (y - self.container_y as i32) / self.cell_height as i32;
Coord::new(x, y)
}
}
pub struct Context {
element_grid: grid_2d::Grid<ElementCell>,
chargrid_frame_buffer: FrameBuffer,
container_element: Element,
#[cfg(feature = "gamepad")]
gamepad: GamepadContext,
}
impl Context {
fn element_display_info(&self) -> ElementDisplayInfo {
let container_rect = self.container_element.get_bounding_client_rect();
let (container_x, container_y) = (container_rect.x(), container_rect.y());
let cell_element = self
.element_grid
.get_index_checked(0)
.element
.dyn_ref::<Element>()
.unwrap();
let cell_rect = cell_element.get_bounding_client_rect();
let (cell_width, cell_height) = (cell_rect.width(), cell_rect.height());
ElementDisplayInfo {
container_x,
container_y,
cell_width,
cell_height,
}
}
pub fn new(size: Size, container: &str) -> Self {
if size.width() == 0 || size.height() == 0 {
panic!("Size must not be zero");
}
let window = web_sys::window().unwrap();
let document = window.document().unwrap();
let container_node = document
.get_element_by_id(container)
.unwrap()
.dyn_into::<Node>()
.unwrap();
let element_grid = grid_2d::Grid::new_fn(size, |_| {
let element = document
.create_element("span")
.unwrap()
.dyn_into::<HtmlElement>()
.unwrap();
ElementCell::with_element(element)
});
for y in 0..size.height() {
for x in 0..size.width() {
container_node
.append_child(
&element_grid
.get_checked(Coord::new(x as i32, y as i32))
.element,
)
.unwrap();
}
container_node
.append_child(
document
.create_element("br")
.unwrap()
.dyn_ref::<HtmlElement>()
.unwrap(),
)
.unwrap();
}
let chargrid_frame_buffer = FrameBuffer::new(size);
Self {
element_grid,
chargrid_frame_buffer,
container_element: document.get_element_by_id(container).unwrap(),
#[cfg(feature = "gamepad")]
gamepad: GamepadContext::new(),
}
}
fn render_internal(&mut self) {
for (chargrid_cell, element_cell) in self
.chargrid_frame_buffer
.iter()
.zip(self.element_grid.iter_mut())
{
if element_cell.character != chargrid_cell.character {
element_cell.character = chargrid_cell.character;
let string = match chargrid_cell.character {
' ' => " ".to_string(),
other => other.to_string(),
};
element_cell.element.set_inner_html(&string);
}
let element_style = element_cell.element.style();
if element_cell.foreground != chargrid_cell.foreground {
element_cell.foreground = chargrid_cell.foreground;
element_style
.set_property("color", &rgba32_to_web_colour(chargrid_cell.foreground))
.unwrap();
}
if element_cell.background != chargrid_cell.background {
element_cell.background = chargrid_cell.background;
element_style
.set_property(
"background-color",
&rgba32_to_web_colour(chargrid_cell.background),
)
.unwrap();
}
if element_cell.underline != chargrid_cell.underline {
element_cell.underline = chargrid_cell.underline;
if chargrid_cell.underline {
element_style
.set_property("text-decoration", "underline")
.unwrap();
} else {
element_style.remove_property("text-decoration").unwrap();
}
}
if element_cell.bold != chargrid_cell.bold {
element_cell.bold = chargrid_cell.bold;
if chargrid_cell.bold {
element_style.set_property("font-weight", "bold").unwrap();
} else {
element_style.remove_property("font-weight").unwrap();
}
}
}
}
pub fn run<C>(self, component: C)
where
C: 'static + Component<State = (), Output = app::Output>,
{
let component = Rc::new(RefCell::new(component));
let context = Rc::new(RefCell::new(self));
run_frame(component.clone(), context.clone());
run_input(component, context);
}
}
fn run_frame<C>(component: Rc<RefCell<C>>, context: Rc<RefCell<Context>>)
where
C: 'static + Component<State = (), Output = app::Output>,
{
let window = web_sys::window().unwrap();
let performance = window.performance().unwrap();
let f: Rc<RefCell<Option<Closure<_>>>> = Rc::new(RefCell::new(None));
let g = f.clone();
let mut last_frame_time_stamp = performance.now();
*g.borrow_mut() = Some(Closure::wrap(Box::new(move || {
let frame_time_stamp = performance.now();
let since_last_frame = frame_time_stamp - last_frame_time_stamp;
last_frame_time_stamp = frame_time_stamp;
let mut context = context.borrow_mut();
on_frame(
&mut *component.borrow_mut(),
Duration::from_millis(since_last_frame as u64),
&mut context.chargrid_frame_buffer,
);
context.render_internal();
window
.request_animation_frame(f.borrow().as_ref().unwrap().as_ref().unchecked_ref())
.unwrap();
}) as Box<dyn FnMut()>));
g.borrow()
.as_ref()
.unwrap()
.as_ref()
.unchecked_ref::<Function>()
.call0(&JsValue::NULL)
.unwrap();
}
mod buttons {
pub fn has_left(buttons: u16) -> bool {
buttons & 1 != 0
}
pub fn has_right(buttons: u16) -> bool {
buttons & 2 != 0
}
pub fn has_middle(buttons: u16) -> bool {
buttons & 4 != 0
}
pub fn has_none(buttons: u16) -> bool {
buttons == 0
}
}
mod button {
use chargrid_input::MouseButton;
const LEFT: i16 = 0;
const MIDDLE: i16 = 1;
const RIGHT: i16 = 2;
pub fn to_mouse_button(button: i16) -> Option<MouseButton> |
}
fn run_input<C>(component: Rc<RefCell<C>>, context: Rc<RefCell<Context>>)
where
C: 'static + Component<State = (), Output = app::Output>,
{
let window = web_sys::window().unwrap();
let handle_keydown = {
let component = component.clone();
let context = context.clone();
Closure::wrap(Box::new(move |event: JsValue| {
let keyboard_event = event.unchecked_ref::<KeyboardEvent>();
if let Some(input) = input::from_js_event_key_press(
keyboard_event.key_code() as u8,
keyboard_event.shift_key(),
) {
on_input(
&mut *component.borrow_mut(),
input,
&context.borrow().chargrid_frame_buffer,
);
}
}) as Box<dyn FnMut(JsValue)>)
};
let handle_mouse_move = {
let component = component.clone();
let context = context.clone();
Closure::wrap(Box::new(move |event: JsValue| {
let mut component = component.borrow_mut();
#[cfg(feature = "gamepad")]
let mut context = context.borrow_mut();
#[cfg(not(feature = "gamepad"))]
let context = context.borrow_mut();
let element_display_info = context.element_display_info();
let mouse_event = event.unchecked_ref::<MouseEvent>();
let coord =
element_display_info.mouse_coord(mouse_event.client_x(), mouse_event.client_y());
let buttons = mouse_event.buttons();
if buttons::has_none(buttons) {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseMove {
button: None,
coord,
}),
&context.chargrid_frame_buffer,
);
}
if buttons::has_left(buttons) {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseMove {
button: Some(MouseButton::Left),
coord,
}),
&context.chargrid_frame_buffer,
);
}
if buttons::has_right(buttons) {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseMove {
button: Some(MouseButton::Right),
coord,
}),
&context.chargrid_frame_buffer,
);
}
if buttons::has_middle(buttons) {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseMove {
button: Some(MouseButton::Middle),
coord,
}),
&context.chargrid_frame_buffer,
);
}
#[cfg(feature = "gamepad")]
for input in context.gamepad.drain_input().collect::<Vec<_>>() {
on_input(
&mut *component,
chargrid_input::Input::Gamepad(input),
&context.chargrid_frame_buffer,
);
}
}) as Box<dyn FnMut(JsValue)>)
};
let handle_mouse_down = {
let component = component.clone();
let context = context.clone();
Closure::wrap(Box::new(move |event: JsValue| {
let mut component = component.borrow_mut();
let context = context.borrow_mut();
let element_display_info = context.element_display_info();
let mouse_event = event.unchecked_ref::<MouseEvent>();
let coord =
element_display_info.mouse_coord(mouse_event.client_x(), mouse_event.client_y());
let button = mouse_event.button();
if let Some(button) = button::to_mouse_button(button) {
on_input(
&mut *component,
Input::Mouse(MouseInput::MousePress { button, coord }),
&context.chargrid_frame_buffer,
);
}
}) as Box<dyn FnMut(JsValue)>)
};
let handle_mouse_up = {
let component = component.clone();
let context = context.clone();
Closure::wrap(Box::new(move |event: JsValue| {
let mut component = component.borrow_mut();
let context = context.borrow_mut();
let element_display_info = context.element_display_info();
let mouse_event = event.unchecked_ref::<MouseEvent>();
let coord =
element_display_info.mouse_coord(mouse_event.client_x(), mouse_event.client_y());
let button = mouse_event.button();
if let Some(button) = button::to_mouse_button(button) {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseRelease {
button: Ok(button),
coord,
}),
&context.chargrid_frame_buffer,
);
}
}) as Box<dyn FnMut(JsValue)>)
};
let handle_wheel = Closure::wrap(Box::new(move |event: JsValue| {
let context = context.borrow_mut();
let mut component = component.borrow_mut();
let element_display_info = context.element_display_info();
let wheel_event = event.unchecked_ref::<WheelEvent>();
let coord =
element_display_info.mouse_coord(wheel_event.client_x(), wheel_event.client_y());
if wheel_event.delta_x() < 0. {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseScroll {
direction: ScrollDirection::Left,
coord,
}),
&context.chargrid_frame_buffer,
);
} else if wheel_event.delta_x() > 0. {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseScroll {
direction: ScrollDirection::Right,
coord,
}),
&context.chargrid_frame_buffer,
);
}
if wheel_event.delta_y() < 0. {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseScroll {
direction: ScrollDirection::Up,
coord,
}),
&context.chargrid_frame_buffer,
);
} else if wheel_event.delta_y() > 0. {
on_input(
&mut *component,
Input::Mouse(MouseInput::MouseScroll {
direction: ScrollDirection::Down,
coord,
}),
&context.chargrid_frame_buffer,
);
}
}) as Box<dyn FnMut(JsValue)>);
window
.add_event_listener_with_callback("keydown", handle_keydown.as_ref().unchecked_ref())
.unwrap();
window
.add_event_listener_with_callback("mousemove", handle_mouse_move.as_ref().unchecked_ref())
.unwrap();
window
.add_event_listener_with_callback("mousedown", handle_mouse_down.as_ref().unchecked_ref())
.unwrap();
window
.add_event_listener_with_callback("mouseup", handle_mouse_up.as_ref().unchecked_ref())
.unwrap();
window
.add_event_listener_with_callback("wheel", handle_wheel.as_ref().unchecked_ref())
.unwrap();
handle_keydown.forget();
handle_mouse_move.forget();
handle_mouse_down.forget();
handle_mouse_up.forget();
handle_wheel.forget();
}
| {
match button {
LEFT => Some(MouseButton::Left),
MIDDLE => Some(MouseButton::Middle),
RIGHT => Some(MouseButton::Right),
_ => None,
}
} |
config.py | """Cookiecutter loader."""
import json
from collections.abc import Iterator
from typing import Any
from cutty.filesystems.domain.path import Path
from cutty.templates.domain.config import Config
from cutty.templates.domain.variables import Variable
def loadvalue(value: Any) -> Any:
"""Stringize scalars."""
if isinstance(value, (bool, int, float)):
return str(value)
if isinstance(value, (str, dict)): | return value
raise RuntimeError(f"unsupported value type {type(value)}") # pragma: no cover
def loadvariable(name: str, value: Any) -> Variable:
"""Load a variable."""
if isinstance(value, list):
choices = tuple(loadvalue(choice) for choice in value)
[valuetype] = set(type(choice) for choice in choices)
return Variable(
name=name,
description=name,
type=valuetype,
default=choices[0],
choices=choices,
interactive=True,
)
value = loadvalue(value)
return Variable(
name=name,
description=name,
type=type(value),
default=value,
choices=(),
interactive=True,
)
def loadcookiecutterconfig(template: str, path: Path) -> Config:
"""Load the configuration for a Cookiecutter template."""
text = (path / "cookiecutter.json").read_text()
data = json.loads(text)
assert isinstance(data, dict) and all( # noqa: S101
isinstance(name, str) for name in data
)
data.setdefault("_template", template)
settings = {name: value for name, value in data.items() if name.startswith("_")}
variables = tuple(
loadvariable(name, value)
for name, value in data.items()
if not name.startswith("_")
)
return Config(settings, variables)
def findcookiecutterpaths(path: Path, config: Config) -> Iterator[Path]:
"""Load project files in a Cookiecutter template."""
for template_dir in path.iterdir():
if all(token in template_dir.name for token in ("{{", "cookiecutter", "}}")):
break
else:
raise RuntimeError("template directory not found") # pragma: no cover
yield template_dir
def findcookiecutterhooks(path: Path) -> Iterator[Path]:
"""Load hooks in a Cookiecutter template."""
hooks = {"pre_gen_project", "post_gen_project"}
hookdir = path / "hooks"
if hookdir.is_dir():
for path in hookdir.iterdir():
if path.is_file() and not path.name.endswith("~") and path.stem in hooks:
yield path | |
api_op_CreateAddressBook.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package alexaforbusiness
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/awslabs/smithy-go/middleware"
smithyhttp "github.com/awslabs/smithy-go/transport/http"
)
// Creates an address book with the specified details.
func (c *Client) CreateAddressBook(ctx context.Context, params *CreateAddressBookInput, optFns ...func(*Options)) (*CreateAddressBookOutput, error) {
if params == nil {
params = &CreateAddressBookInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateAddressBook", params, optFns, addOperationCreateAddressBookMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateAddressBookOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateAddressBookInput struct {
// The name of the address book.
//
// This member is required.
Name *string
// A unique, user-specified identifier for the request that ensures idempotency.
ClientRequestToken *string
// The description of the address book.
Description *string
}
type CreateAddressBookOutput struct {
// The ARN of the newly created address book.
AddressBookArn *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
}
func | (stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsAwsjson11_serializeOpCreateAddressBook{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsAwsjson11_deserializeOpCreateAddressBook{}, middleware.After)
if err != nil {
return err
}
awsmiddleware.AddRequestInvocationIDMiddleware(stack)
smithyhttp.AddContentLengthMiddleware(stack)
addResolveEndpointMiddleware(stack, options)
v4.AddComputePayloadSHA256Middleware(stack)
addRetryMiddlewares(stack, options)
addHTTPSignerV4Middleware(stack, options)
awsmiddleware.AddAttemptClockSkewMiddleware(stack)
addClientUserAgent(stack)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
smithyhttp.AddCloseResponseBodyMiddleware(stack)
addIdempotencyToken_opCreateAddressBookMiddleware(stack, options)
addOpCreateAddressBookValidationMiddleware(stack)
stack.Initialize.Add(newServiceMetadataMiddleware_opCreateAddressBook(options.Region), middleware.Before)
addRequestIDRetrieverMiddleware(stack)
addResponseErrorMiddleware(stack)
return nil
}
type idempotencyToken_initializeOpCreateAddressBook struct {
tokenProvider IdempotencyTokenProvider
}
func (*idempotencyToken_initializeOpCreateAddressBook) ID() string {
return "OperationIdempotencyTokenAutoFill"
}
func (m *idempotencyToken_initializeOpCreateAddressBook) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
if m.tokenProvider == nil {
return next.HandleInitialize(ctx, in)
}
input, ok := in.Parameters.(*CreateAddressBookInput)
if !ok {
return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateAddressBookInput ")
}
if input.ClientRequestToken == nil {
t, err := m.tokenProvider.GetIdempotencyToken()
if err != nil {
return out, metadata, err
}
input.ClientRequestToken = &t
}
return next.HandleInitialize(ctx, in)
}
func addIdempotencyToken_opCreateAddressBookMiddleware(stack *middleware.Stack, cfg Options) {
stack.Initialize.Add(&idempotencyToken_initializeOpCreateAddressBook{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
}
func newServiceMetadataMiddleware_opCreateAddressBook(region string) awsmiddleware.RegisterServiceMetadata {
return awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "a4b",
OperationName: "CreateAddressBook",
}
}
| addOperationCreateAddressBookMiddlewares |
explorerroutes.go | // Copyright (c) 2018-2019, The Decred developers
// Copyright (c) 2017, The dcrdata developers
// See LICENSE for details.
package explorer
import (
"database/sql"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"math"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/decred/dcrd/chaincfg/chainhash"
"github.com/decred/dcrd/chaincfg/v2"
"github.com/decred/dcrd/dcrutil/v2"
chainjson "github.com/decred/dcrd/rpc/jsonrpc/types/v2"
"github.com/decred/dcrd/txscript/v2"
"github.com/decred/dcrdata/db/dbtypes/v2"
"github.com/decred/dcrdata/exchanges/v2"
"github.com/decred/dcrdata/explorer/types/v2"
"github.com/decred/dcrdata/gov/v3/agendas"
pitypes "github.com/decred/dcrdata/gov/v3/politeia/types"
"github.com/decred/dcrdata/txhelpers/v4"
humanize "github.com/dustin/go-humanize"
)
var dummyRequest = new(http.Request)
func init() {
// URL should be set because commonData call a method on it.
dummyRequest.URL, _ = url.Parse("/")
}
// Cookies contains information from the request cookies.
type Cookies struct {
DarkMode bool
}
// CommonPageData is the basis for data structs used for HTML templates.
// explorerUI.commonData returns an initialized instance or CommonPageData,
// which itself should be used to initialize page data template structs.
type CommonPageData struct {
Tip *types.WebBasicBlock
Version string
ChainParams *chaincfg.Params
BlockTimeUnix int64
DevAddress string
Links *links
NetName string
Cookies Cookies
RequestURI string
}
// Status page strings
const (
defaultErrorCode = "Something went wrong..."
defaultErrorMessage = "Try refreshing... it usually fixes things."
pageDisabledCode = "%s has been disabled for now."
wrongNetwork = "Wrong Network"
)
// expStatus defines the various status types supported by the system.
type expStatus string
// These are the explorer status messages used by the status page.
const (
ExpStatusError expStatus = "Error"
ExpStatusNotFound expStatus = "Not Found"
ExpStatusFutureBlock expStatus = "Future Block"
ExpStatusNotSupported expStatus = "Not Supported"
ExpStatusBadRequest expStatus = "Bad Request"
ExpStatusNotImplemented expStatus = "Not Implemented"
ExpStatusPageDisabled expStatus = "Page Disabled"
ExpStatusWrongNetwork expStatus = "Wrong Network"
ExpStatusDeprecated expStatus = "Deprecated"
ExpStatusSyncing expStatus = "Blocks Syncing"
ExpStatusDBTimeout expStatus = "Database Timeout"
ExpStatusP2PKAddress expStatus = "P2PK Address Type"
)
func (e expStatus) IsNotFound() bool {
return e == ExpStatusNotFound
}
func (e expStatus) IsWrongNet() bool {
return e == ExpStatusWrongNetwork
}
func (e expStatus) IsP2PKAddress() bool {
return e == ExpStatusP2PKAddress
}
func (e expStatus) IsFutureBlock() bool {
return e == ExpStatusFutureBlock
}
func (e expStatus) IsSyncing() bool {
return e == ExpStatusSyncing
}
// number of blocks displayed on /visualblocks
const homePageBlocksMaxCount = 30
// netName returns the name used when referring to a decred network.
func netName(chainParams *chaincfg.Params) string |
func (exp *explorerUI) timeoutErrorPage(w http.ResponseWriter, err error, debugStr string) (wasTimeout bool) {
wasTimeout = dbtypes.IsTimeoutErr(err)
if wasTimeout {
log.Debugf("%s: %v", debugStr, err)
exp.StatusPage(w, defaultErrorCode,
"Database timeout. Please try again later.", "", ExpStatusDBTimeout)
}
return
}
// For the exchange rates on the homepage
type homeConversions struct {
ExchangeRate *exchanges.Conversion
StakeDiff *exchanges.Conversion
CoinSupply *exchanges.Conversion
PowSplit *exchanges.Conversion
TreasurySplit *exchanges.Conversion
TreasuryBalance *exchanges.Conversion
}
// Home is the page handler for the "/" path.
func (exp *explorerUI) Home(w http.ResponseWriter, r *http.Request) {
height, err := exp.dataSource.GetHeight()
if err != nil {
log.Errorf("GetHeight failed: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "",
ExpStatusError)
return
}
blocks := exp.dataSource.GetExplorerBlocks(int(height), int(height)-8)
var bestBlock *types.BlockBasic
if blocks == nil {
bestBlock = new(types.BlockBasic)
} else {
bestBlock = blocks[0]
}
// Safely retrieve the current inventory pointer.
inv := exp.MempoolInventory()
// Lock the shared inventory struct from change (e.g. in MempoolMonitor).
inv.RLock()
exp.pageData.RLock()
tallys, consensus := inv.VotingInfo.BlockStatus(bestBlock.Hash)
// Get fiat conversions if available
homeInfo := exp.pageData.HomeInfo
var conversions *homeConversions
xcBot := exp.xcBot
if xcBot != nil {
conversions = &homeConversions{
ExchangeRate: xcBot.Conversion(1.0),
StakeDiff: xcBot.Conversion(homeInfo.StakeDiff),
CoinSupply: xcBot.Conversion(dcrutil.Amount(homeInfo.CoinSupply).ToCoin()),
PowSplit: xcBot.Conversion(dcrutil.Amount(homeInfo.NBlockSubsidy.PoW).ToCoin()),
TreasurySplit: xcBot.Conversion(dcrutil.Amount(homeInfo.NBlockSubsidy.Dev).ToCoin()),
TreasuryBalance: xcBot.Conversion(dcrutil.Amount(homeInfo.DevFund).ToCoin()),
}
}
str, err := exp.templates.exec("home", struct {
*CommonPageData
Info *types.HomeInfo
Mempool *types.MempoolInfo
BestBlock *types.BlockBasic
BlockTally []int
Consensus int
Blocks []*types.BlockBasic
Conversions *homeConversions
PercentChange float64
}{
CommonPageData: exp.commonData(r),
Info: homeInfo,
Mempool: inv,
BestBlock: bestBlock,
BlockTally: tallys,
Consensus: consensus,
Blocks: blocks,
Conversions: conversions,
PercentChange: homeInfo.PoolInfo.PercentTarget - 100,
})
inv.RUnlock()
exp.pageData.RUnlock()
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// SideChains is the page handler for the "/side" path.
func (exp *explorerUI) SideChains(w http.ResponseWriter, r *http.Request) {
sideBlocks, err := exp.dataSource.SideChainBlocks()
if exp.timeoutErrorPage(w, err, "SideChainBlocks") {
return
}
if err != nil {
log.Errorf("Unable to get side chain blocks: %v", err)
exp.StatusPage(w, defaultErrorCode,
"failed to retrieve side chain blocks", "", ExpStatusError)
return
}
str, err := exp.templates.exec("sidechains", struct {
*CommonPageData
Data []*dbtypes.BlockStatus
}{
CommonPageData: exp.commonData(r),
Data: sideBlocks,
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// InsightRootPage is the page for the "/insight" path.
func (exp *explorerUI) InsightRootPage(w http.ResponseWriter, r *http.Request) {
str, err := exp.templates.exec("insight_root", struct {
*CommonPageData
}{
CommonPageData: exp.commonData(r),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// DisapprovedBlocks is the page handler for the "/disapproved" path.
func (exp *explorerUI) DisapprovedBlocks(w http.ResponseWriter, r *http.Request) {
disapprovedBlocks, err := exp.dataSource.DisapprovedBlocks()
if exp.timeoutErrorPage(w, err, "DisapprovedBlocks") {
return
}
if err != nil {
log.Errorf("Unable to get stakeholder disapproved blocks: %v", err)
exp.StatusPage(w, defaultErrorCode,
"failed to retrieve stakeholder disapproved blocks", "", ExpStatusError)
return
}
str, err := exp.templates.exec("disapproved", struct {
*CommonPageData
Data []*dbtypes.BlockStatus
}{
CommonPageData: exp.commonData(r),
Data: disapprovedBlocks,
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// VisualBlocks is the page handler for the "/visualblocks" path.
func (exp *explorerUI) VisualBlocks(w http.ResponseWriter, r *http.Request) {
// Get top N blocks and trim each block to have just the fields required for
// this page.
height, err := exp.dataSource.GetHeight()
if err != nil {
log.Errorf("GetHeight failed: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "",
ExpStatusError)
return
}
blocks := exp.dataSource.GetExplorerFullBlocks(int(height),
int(height)-homePageBlocksMaxCount)
// trim unwanted data in each block
trimmedBlocks := make([]*types.TrimmedBlockInfo, 0, len(blocks))
for _, block := range blocks {
trimmedBlock := &types.TrimmedBlockInfo{
Time: block.BlockTime,
Height: block.Height,
Total: block.TotalSent,
Fees: block.MiningFee,
Subsidy: block.Subsidy,
Votes: block.Votes,
Tickets: block.Tickets,
Revocations: block.Revs,
Transactions: types.FilterRegularTx(block.Tx),
}
trimmedBlocks = append(trimmedBlocks, trimmedBlock)
}
// Construct the required TrimmedMempoolInfo from the shared inventory.
inv := exp.MempoolInventory()
mempoolInfo := inv.Trim() // Trim internally locks the MempoolInfo.
exp.pageData.RLock()
mempoolInfo.Subsidy = exp.pageData.HomeInfo.NBlockSubsidy
str, err := exp.templates.exec("visualblocks", struct {
*CommonPageData
Info *types.HomeInfo
Mempool *types.TrimmedMempoolInfo
Blocks []*types.TrimmedBlockInfo
}{
CommonPageData: exp.commonData(r),
Info: exp.pageData.HomeInfo,
Mempool: mempoolInfo,
Blocks: trimmedBlocks,
})
exp.pageData.RUnlock()
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// StakeDiffWindows is the page handler for the "/ticketpricewindows" path.
func (exp *explorerUI) StakeDiffWindows(w http.ResponseWriter, r *http.Request) {
var offsetWindow uint64
if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" {
o, err := strconv.ParseUint(offsetStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
offsetWindow = o
}
var rows uint64
if rowsStr := r.URL.Query().Get("rows"); rowsStr != "" {
o, err := strconv.ParseUint(rowsStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
rows = o
}
bestWindow := uint64(exp.Height() / exp.ChainParams.StakeDiffWindowSize)
if offsetWindow > bestWindow {
offsetWindow = bestWindow
}
if rows == 0 {
rows = minExplorerRows
} else if rows > maxExplorerRows {
rows = maxExplorerRows
}
windows, err := exp.dataSource.PosIntervals(rows, offsetWindow)
if exp.timeoutErrorPage(w, err, "PosIntervals") {
return
}
if err != nil {
log.Errorf("The specified windows are invalid. offset=%d&rows=%d: "+
"error: %v ", offsetWindow, rows, err)
exp.StatusPage(w, defaultErrorCode,
"The specified ticket price windows could not be found", "", ExpStatusNotFound)
return
}
linkTemplate := "/ticketpricewindows?offset=%d&rows=" + strconv.FormatUint(rows, 10)
str, err := exp.templates.exec("windows", struct {
*CommonPageData
Data []*dbtypes.BlocksGroupedInfo
WindowSize int64
BestWindow int64
OffsetWindow int64
Limit int64
TimeGrouping string
Pages pageNumbers
}{
CommonPageData: exp.commonData(r),
Data: windows,
WindowSize: exp.ChainParams.StakeDiffWindowSize,
BestWindow: int64(bestWindow),
OffsetWindow: int64(offsetWindow),
Limit: int64(rows),
TimeGrouping: "Windows",
Pages: calcPages(int(bestWindow), int(rows), int(offsetWindow), linkTemplate),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// DayBlocksListing handles "/day" page.
func (exp *explorerUI) DayBlocksListing(w http.ResponseWriter, r *http.Request) {
exp.timeBasedBlocksListing("Days", w, r)
}
// WeekBlocksListing handles "/week" page.
func (exp *explorerUI) WeekBlocksListing(w http.ResponseWriter, r *http.Request) {
exp.timeBasedBlocksListing("Weeks", w, r)
}
// MonthBlocksListing handles "/month" page.
func (exp *explorerUI) MonthBlocksListing(w http.ResponseWriter, r *http.Request) {
exp.timeBasedBlocksListing("Months", w, r)
}
// YearBlocksListing handles "/year" page.
func (exp *explorerUI) YearBlocksListing(w http.ResponseWriter, r *http.Request) {
exp.timeBasedBlocksListing("Years", w, r)
}
// TimeBasedBlocksListing is the main handler for "/day", "/week", "/month" and
// "/year".
func (exp *explorerUI) timeBasedBlocksListing(val string, w http.ResponseWriter, r *http.Request) {
var offset uint64
if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" {
o, err := strconv.ParseUint(offsetStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
offset = o
}
var rows uint64
if rowsStr := r.URL.Query().Get("rows"); rowsStr != "" {
o, err := strconv.ParseUint(rowsStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
rows = o
}
grouping := dbtypes.TimeGroupingFromStr(val)
i, err := dbtypes.TimeBasedGroupingToInterval(grouping)
if err != nil {
// default to year grouping if grouping is missing
i, err = dbtypes.TimeBasedGroupingToInterval(dbtypes.YearGrouping)
if err != nil {
exp.StatusPage(w, defaultErrorCode, "Invalid year grouping found.", "",
ExpStatusError)
log.Errorf("Invalid year grouping found: error: %v ", err)
return
}
grouping = dbtypes.YearGrouping
}
oldestBlockTime := exp.ChainParams.GenesisBlock.Header.Timestamp.Unix()
maxOffset := (time.Now().Unix() - oldestBlockTime) / int64(i)
m := uint64(maxOffset)
if offset > m {
offset = m
}
oldestBlockTimestamp := exp.ChainParams.GenesisBlock.Header.Timestamp
oldestBlockMonth := oldestBlockTimestamp.Month()
oldestBlockDay := oldestBlockTimestamp.Day()
now := time.Now()
if (grouping == dbtypes.YearGrouping && now.Month() < oldestBlockMonth) ||
grouping == dbtypes.MonthGrouping && now.Day() < oldestBlockDay ||
grouping == dbtypes.YearGrouping && now.Month() == oldestBlockMonth && now.Day() < oldestBlockDay {
maxOffset = maxOffset + 1
}
if rows == 0 {
rows = minExplorerRows
} else if rows > maxExplorerRows {
rows = maxExplorerRows
}
data, err := exp.dataSource.TimeBasedIntervals(grouping, rows, offset)
if exp.timeoutErrorPage(w, err, "TimeBasedIntervals") {
return
}
if err != nil {
log.Errorf("The specified /%s intervals are invalid. offset=%d&rows=%d: "+
"error: %v ", val, offset, rows, err)
exp.StatusPage(w, defaultErrorCode,
"The specified block intervals could be not found", "", ExpStatusNotFound)
return
}
// If the view is "years" and the top row is this year, modify the formatted
// time string to indicate its a partial result.
if val == "Years" && len(data) > 0 && data[0].EndTime.T.Year() == time.Now().Year() {
data[0].FormattedStartTime = fmt.Sprintf("%s YTD", time.Now().Format("2006"))
}
linkTemplate := "/" + strings.ToLower(val) + "?offset=%d&rows=" + strconv.FormatUint(rows, 10)
str, err := exp.templates.exec("timelisting", struct {
*CommonPageData
Data []*dbtypes.BlocksGroupedInfo
TimeGrouping string
Offset int64
Limit int64
BestGrouping int64
Pages pageNumbers
}{
CommonPageData: exp.commonData(r),
Data: data,
TimeGrouping: val,
Offset: int64(offset),
Limit: int64(rows),
BestGrouping: maxOffset,
Pages: calcPages(int(maxOffset), int(rows), int(offset), linkTemplate),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// Blocks is the page handler for the "/blocks" path.
func (exp *explorerUI) Blocks(w http.ResponseWriter, r *http.Request) {
bestBlockHeight, err := exp.dataSource.GetHeight()
if err != nil {
log.Errorf("GetHeight failed: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "",
ExpStatusError)
return
}
var height int64
if heightStr := r.URL.Query().Get("height"); heightStr != "" {
h, err := strconv.ParseUint(heightStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
height = int64(h)
} else {
height = bestBlockHeight
}
var rows int64
if rowsStr := r.URL.Query().Get("rows"); rowsStr != "" {
h, err := strconv.ParseUint(rowsStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
rows = int64(h)
}
if height > bestBlockHeight {
height = bestBlockHeight
}
if rows == 0 {
rows = minExplorerRows
} else if rows > maxExplorerRows {
rows = maxExplorerRows
}
var end int
oldestBlock := height - rows + 1
if oldestBlock < 0 {
end = -1
} else {
end = int(height - rows)
}
summaries := exp.dataSource.GetExplorerBlocks(int(height), end)
if summaries == nil {
log.Errorf("Unable to get blocks: height=%d&rows=%d", height, rows)
exp.StatusPage(w, defaultErrorCode, "could not find those blocks", "",
ExpStatusNotFound)
return
}
for _, s := range summaries {
blockStatus, err := exp.dataSource.BlockStatus(s.Hash)
if exp.timeoutErrorPage(w, err, "BlockStatus") {
return
}
if err != nil && err != sql.ErrNoRows {
log.Warnf("Unable to retrieve chain status for block %s: %v",
s.Hash, err)
}
s.Valid = blockStatus.IsValid
s.MainChain = blockStatus.IsMainchain
}
linkTemplate := "/blocks?height=%d&rows=" + strconv.FormatInt(rows, 10)
oldestHeight := bestBlockHeight % rows
str, err := exp.templates.exec("explorer", struct {
*CommonPageData
Data []*types.BlockBasic
BestBlock int64
OldestHeight int64
Rows int64
RowsCount int64
WindowSize int64
TimeGrouping string
Pages pageNumbers
}{
CommonPageData: exp.commonData(r),
Data: summaries,
BestBlock: bestBlockHeight,
OldestHeight: oldestHeight,
Rows: rows,
RowsCount: int64(len(summaries)),
WindowSize: exp.ChainParams.StakeDiffWindowSize,
TimeGrouping: "Blocks",
Pages: calcPagesDesc(int(bestBlockHeight), int(rows), int(height), linkTemplate),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// Block is the page handler for the "/block" path.
func (exp *explorerUI) Block(w http.ResponseWriter, r *http.Request) {
// Retrieve the block specified on the path.
hash := getBlockHashCtx(r)
data := exp.dataSource.GetExplorerBlock(hash)
if data == nil {
log.Errorf("Unable to get block %s", hash)
exp.StatusPage(w, defaultErrorCode, "could not find that block", "",
ExpStatusNotFound)
return
}
// Check if there are any regular non-coinbase transactions in the block.
var count int
data.TxAvailable = true
for _, i := range data.Tx {
if i.Coinbase {
count++
}
}
if count == len(data.Tx) {
data.TxAvailable = false
}
// Retrieve missed votes, main/side chain status, and stakeholder approval.
var err error
data.Misses, err = exp.dataSource.BlockMissedVotes(hash)
if exp.timeoutErrorPage(w, err, "BlockMissedVotes") {
return
}
if err != nil && err != sql.ErrNoRows {
log.Warnf("Unable to retrieve missed votes for block %s: %v", hash, err)
}
var blockStatus dbtypes.BlockStatus
blockStatus, err = exp.dataSource.BlockStatus(hash)
if exp.timeoutErrorPage(w, err, "BlockStatus") {
return
}
if err != nil && err != sql.ErrNoRows {
log.Warnf("Unable to retrieve chain status for block %s: %v", hash, err)
}
data.Valid = blockStatus.IsValid
data.MainChain = blockStatus.IsMainchain
pageData := struct {
*CommonPageData
Data *types.BlockInfo
FiatConversion *exchanges.Conversion
}{
CommonPageData: exp.commonData(r),
Data: data,
}
if exp.xcBot != nil && time.Since(data.BlockTime.T) < time.Hour {
pageData.FiatConversion = exp.xcBot.Conversion(data.TotalSent)
}
str, err := exp.templates.exec("block", pageData)
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.Header().Set("Turbolinks-Location", r.URL.RequestURI())
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// Mempool is the page handler for the "/mempool" path.
func (exp *explorerUI) Mempool(w http.ResponseWriter, r *http.Request) {
// Safely retrieve the inventory pointer, which can be reset in StoreMPData.
inv := exp.MempoolInventory()
// Prevent modifications to the shared inventory struct (e.g. in the
// MempoolMonitor) while marshaling the inventory.
inv.RLock()
str, err := exp.templates.exec("mempool", struct {
*CommonPageData
Mempool *types.MempoolInfo
}{
CommonPageData: exp.commonData(r),
Mempool: inv,
})
inv.RUnlock()
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// Ticketpool is the page handler for the "/ticketpool" path.
func (exp *explorerUI) Ticketpool(w http.ResponseWriter, r *http.Request) {
str, err := exp.templates.exec("ticketpool", exp.commonData(r))
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// TxPage is the page handler for the "/tx" path.
func (exp *explorerUI) TxPage(w http.ResponseWriter, r *http.Request) {
// attempt to get tx hash string from URL path
hash, ok := r.Context().Value(ctxTxHash).(string)
if !ok {
log.Trace("txid not set")
exp.StatusPage(w, defaultErrorCode, "there was no transaction requested",
"", ExpStatusNotFound)
return
}
inout, _ := r.Context().Value(ctxTxInOut).(string)
if inout != "in" && inout != "out" && inout != "" {
exp.StatusPage(w, defaultErrorCode, "there was no transaction requested",
"", ExpStatusNotFound)
return
}
ioid, _ := r.Context().Value(ctxTxInOutId).(string)
inoutid, _ := strconv.ParseInt(ioid, 10, 0)
tx := exp.dataSource.GetExplorerTx(hash)
// If dcrd has no information about the transaction, pull the transaction
// details from the auxiliary DB database.
if tx == nil {
// Search for occurrences of the transaction in the database.
dbTxs, err := exp.dataSource.Transaction(hash)
if exp.timeoutErrorPage(w, err, "Transaction") {
return
}
if err != nil {
log.Errorf("Unable to retrieve transaction details for %s.", hash)
exp.StatusPage(w, defaultErrorCode, "could not find that transaction",
"", ExpStatusNotFound)
return
}
if dbTxs == nil {
exp.StatusPage(w, defaultErrorCode, "that transaction has not been recorded",
"", ExpStatusNotFound)
return
}
// Take the first one. The query order should put valid at the top of
// the list. Regardless of order, the transaction web page will link to
// all occurrences of the transaction.
dbTx0 := dbTxs[0]
fees := dcrutil.Amount(dbTx0.Fees)
tx = &types.TxInfo{
TxBasic: &types.TxBasic{
TxID: hash,
FormattedSize: humanize.Bytes(uint64(dbTx0.Size)),
Total: dcrutil.Amount(dbTx0.Sent).ToCoin(),
Fee: fees,
FeeRate: dcrutil.Amount((1000 * int64(fees)) / int64(dbTx0.Size)),
// VoteInfo TODO - check votes table
Coinbase: dbTx0.BlockIndex == 0,
},
SpendingTxns: make([]types.TxInID, len(dbTx0.VoutDbIds)), // SpendingTxns filled below
Type: txhelpers.TxTypeToString(int(dbTx0.TxType)),
// Vins - looked-up in vins table
// Vouts - looked-up in vouts table
BlockHeight: dbTx0.BlockHeight,
BlockIndex: dbTx0.BlockIndex,
BlockHash: dbTx0.BlockHash,
Confirmations: exp.Height() - dbTx0.BlockHeight + 1,
Time: types.TimeDef(dbTx0.Time),
}
// Coinbase transactions are regular, but call them coinbase for the page.
if tx.Coinbase {
tx.Type = types.CoinbaseTypeStr
}
// Retrieve vouts from DB.
vouts, err := exp.dataSource.VoutsForTx(dbTx0)
if exp.timeoutErrorPage(w, err, "VoutsForTx") {
return
}
if err != nil {
log.Errorf("Failed to retrieve all vout details for transaction %s: %v",
dbTx0.TxID, err)
exp.StatusPage(w, defaultErrorCode, "VoutsForTx failed", "", ExpStatusError)
return
}
// Convert to explorer.Vout, getting spending information from DB.
for iv := range vouts {
// Check pkScript for OP_RETURN
var opReturn string
asm, _ := txscript.DisasmString(vouts[iv].ScriptPubKey)
if strings.Contains(asm, "OP_RETURN") {
opReturn = asm
}
// Determine if the outpoint is spent
spendingTx, _, _, err := exp.dataSource.SpendingTransaction(hash, vouts[iv].TxIndex)
if exp.timeoutErrorPage(w, err, "SpendingTransaction") {
return
}
if err != nil && err != sql.ErrNoRows {
log.Warnf("SpendingTransaction failed for outpoint %s:%d: %v",
hash, vouts[iv].TxIndex, err)
}
amount := dcrutil.Amount(int64(vouts[iv].Value)).ToCoin()
tx.Vout = append(tx.Vout, types.Vout{
Addresses: vouts[iv].ScriptPubKeyData.Addresses,
Amount: amount,
FormattedAmount: humanize.Commaf(amount),
Type: txhelpers.TxTypeToString(int(vouts[iv].TxType)),
Spent: spendingTx != "",
OP_RETURN: opReturn,
Index: vouts[iv].TxIndex,
})
}
// Retrieve vins from DB.
vins, prevPkScripts, scriptVersions, err := exp.dataSource.VinsForTx(dbTx0)
if exp.timeoutErrorPage(w, err, "VinsForTx") {
return
}
if err != nil {
log.Errorf("Failed to retrieve all vin details for transaction %s: %v",
dbTx0.TxID, err)
exp.StatusPage(w, defaultErrorCode, "VinsForTx failed", "", ExpStatusError)
return
}
// Convert to explorer.Vin from dbtypes.VinTxProperty.
for iv := range vins {
// Decode all addresses from previous outpoint's pkScript.
var addresses []string
pkScriptsStr, err := hex.DecodeString(prevPkScripts[iv])
if err != nil {
log.Errorf("Failed to decode pkgScript: %v", err)
}
_, scrAddrs, _, err := txscript.ExtractPkScriptAddrs(scriptVersions[iv],
pkScriptsStr, exp.ChainParams)
if err != nil {
log.Errorf("Failed to decode pkScript: %v", err)
} else {
for ia := range scrAddrs {
addresses = append(addresses, scrAddrs[ia].Address())
}
}
// If the scriptsig does not decode or disassemble, oh well.
asm, _ := txscript.DisasmString(vins[iv].ScriptHex)
txIndex := vins[iv].TxIndex
amount := dcrutil.Amount(vins[iv].ValueIn).ToCoin()
var coinbase, stakebase string
if txIndex == 0 {
if tx.Coinbase {
coinbase = hex.EncodeToString(txhelpers.CoinbaseScript)
} else if tx.IsVote() {
stakebase = hex.EncodeToString(txhelpers.CoinbaseScript)
}
}
tx.Vin = append(tx.Vin, types.Vin{
Vin: &chainjson.Vin{
Coinbase: coinbase,
Stakebase: stakebase,
Txid: hash,
Vout: vins[iv].PrevTxIndex,
Tree: dbTx0.Tree,
Sequence: vins[iv].Sequence,
AmountIn: amount,
BlockHeight: uint32(tx.BlockHeight),
BlockIndex: tx.BlockIndex,
ScriptSig: &chainjson.ScriptSig{
Asm: asm,
Hex: hex.EncodeToString(vins[iv].ScriptHex),
},
},
Addresses: addresses,
FormattedAmount: humanize.Commaf(amount),
Index: txIndex,
})
}
// For coinbase and stakebase, get maturity status.
if tx.Coinbase || tx.IsVote() || tx.IsRevocation() {
tx.Maturity = int64(exp.ChainParams.CoinbaseMaturity)
if tx.IsVote() {
tx.Maturity++ // TODO why as elsewhere for votes?
}
if tx.Confirmations >= int64(exp.ChainParams.CoinbaseMaturity) {
tx.Mature = "True"
} else if tx.IsVote() {
tx.VoteFundsLocked = "True"
}
coinbaseMaturityInHours :=
exp.ChainParams.TargetTimePerBlock.Hours() * float64(tx.Maturity)
tx.MaturityTimeTill = coinbaseMaturityInHours *
(1 - float64(tx.Confirmations)/float64(tx.Maturity))
}
// For ticket purchase, get status and maturity blocks, but compute
// details in normal code branch below.
if tx.IsTicket() {
tx.TicketInfo.TicketMaturity = int64(exp.ChainParams.TicketMaturity)
if tx.Confirmations >= tx.TicketInfo.TicketMaturity {
tx.Mature = "True"
}
}
} // tx == nil (not found by dcrd)
// Check for any transaction outputs that appear unspent.
unspents := types.UnspentOutputIndices(tx.Vout)
if len(unspents) > 0 {
// Grab the mempool transaction inputs that match this transaction.
mempoolVins := exp.GetTxMempoolInputs(hash, tx.Type)
if len(mempoolVins) > 0 {
// A quick matching function.
matchingVin := func(vout *types.Vout) (string, uint32) {
for vindex := range mempoolVins {
vin := mempoolVins[vindex]
for inIdx := range vin.Inputs {
input := vin.Inputs[inIdx]
if input.Outdex == vout.Index {
return vin.TxId, input.Index
}
}
}
return "", 0
}
for _, outdex := range unspents {
vout := &tx.Vout[outdex]
txid, vindex := matchingVin(vout)
if txid == "" {
continue
}
vout.Spent = true
tx.SpendingTxns[vout.Index] = types.TxInID{
Hash: txid,
Index: vindex,
}
}
}
}
// Set ticket-related parameters.
if tx.IsTicket() {
blocksLive := tx.Confirmations - int64(exp.ChainParams.TicketMaturity)
tx.TicketInfo.TicketPoolSize = int64(exp.ChainParams.TicketPoolSize) *
int64(exp.ChainParams.TicketsPerBlock)
tx.TicketInfo.TicketExpiry = int64(exp.ChainParams.TicketExpiry)
expirationInDays := (exp.ChainParams.TargetTimePerBlock.Hours() *
float64(exp.ChainParams.TicketExpiry)) / 24
maturityInHours := (exp.ChainParams.TargetTimePerBlock.Hours() *
float64(tx.TicketInfo.TicketMaturity))
tx.TicketInfo.TimeTillMaturity = ((float64(exp.ChainParams.TicketMaturity) -
float64(tx.Confirmations)) / float64(exp.ChainParams.TicketMaturity)) *
maturityInHours
ticketExpiryBlocksLeft := int64(exp.ChainParams.TicketExpiry) - blocksLive
tx.TicketInfo.TicketExpiryDaysLeft = (float64(ticketExpiryBlocksLeft) /
float64(exp.ChainParams.TicketExpiry)) * expirationInDays
}
// For any coinbase transactions look up the total block fees to include
// as part of the inputs.
if tx.Type == types.CoinbaseTypeStr {
data := exp.dataSource.GetExplorerBlock(tx.BlockHash)
if data == nil {
log.Errorf("Unable to get block %s", tx.BlockHash)
} else {
// BlockInfo.MiningFee is coin (float64), while
// TxInfo.BlockMiningFee is int64 (atoms), so convert. If the
// float64 is somehow invalid, use the default zero value.
feeAmt, _ := dcrutil.NewAmount(data.MiningFee)
tx.BlockMiningFee = int64(feeAmt)
}
}
// Details on all the blocks containing this transaction
blocks, blockInds, err := exp.dataSource.TransactionBlocks(tx.TxID)
if exp.timeoutErrorPage(w, err, "TransactionBlocks") {
return
}
if err != nil {
log.Errorf("Unable to retrieve blocks for transaction %s: %v",
hash, err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, tx.TxID, ExpStatusError)
return
}
// See if any of these blocks are mainchain and stakeholder-approved
// (a.k.a. valid).
var isConfirmedMainchain bool
for ib := range blocks {
if blocks[ib].IsValid && blocks[ib].IsMainchain {
isConfirmedMainchain = true
break
}
}
// For each output of this transaction, look up any spending transactions,
// and the index of the spending transaction input.
spendingTxHashes, spendingTxVinInds, voutInds, err :=
exp.dataSource.SpendingTransactions(hash)
if exp.timeoutErrorPage(w, err, "SpendingTransactions") {
return
}
if err != nil {
log.Errorf("Unable to retrieve spending transactions for %s: %v", hash, err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, hash, ExpStatusError)
return
}
for i, vout := range voutInds {
if int(vout) >= len(tx.SpendingTxns) {
log.Errorf("Invalid spending transaction data (%s:%d)", hash, vout)
continue
}
tx.SpendingTxns[vout] = types.TxInID{
Hash: spendingTxHashes[i],
Index: spendingTxVinInds[i],
}
}
if tx.IsTicket() {
spendStatus, poolStatus, err := exp.dataSource.PoolStatusForTicket(hash)
if exp.timeoutErrorPage(w, err, "PoolStatusForTicket") {
return
}
if err != nil && err != sql.ErrNoRows {
log.Errorf("Unable to retrieve ticket spend and pool status for %s: %v",
hash, err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
} else if err == sql.ErrNoRows {
if tx.Confirmations != 0 {
log.Warnf("Spend and pool status not found for ticket %s: %v", hash, err)
}
} else {
if tx.Mature == "False" {
tx.TicketInfo.PoolStatus = "immature"
} else {
tx.TicketInfo.PoolStatus = poolStatus.String()
}
tx.TicketInfo.SpendStatus = spendStatus.String()
// For missed tickets, get the block in which it should have voted.
if poolStatus == dbtypes.PoolStatusMissed {
tx.TicketInfo.LotteryBlock, _, err = exp.dataSource.TicketMiss(hash)
if err != nil && err != sql.ErrNoRows {
log.Errorf("Unable to retrieve miss information for ticket %s: %v",
hash, err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
} else if err == sql.ErrNoRows {
log.Warnf("No mainchain miss data for ticket %s: %v",
hash, err)
}
}
// Ticket luck and probability of voting.
// blockLive < 0 for immature tickets
blocksLive := tx.Confirmations - int64(exp.ChainParams.TicketMaturity)
if tx.TicketInfo.SpendStatus == "Voted" {
// Blocks from eligible until voted (actual luck)
txhash, err := chainhash.NewHashFromStr(tx.SpendingTxns[0].Hash)
if err != nil {
exp.StatusPage(w, defaultErrorCode, err.Error(), "", ExpStatusError)
return
}
tx.TicketInfo.TicketLiveBlocks = exp.dataSource.TxHeight(txhash) -
tx.BlockHeight - int64(exp.ChainParams.TicketMaturity) - 1
} else if tx.Confirmations >= int64(exp.ChainParams.TicketExpiry+
uint32(exp.ChainParams.TicketMaturity)) { // Expired
// Blocks ticket was active before expiring (actual no luck)
tx.TicketInfo.TicketLiveBlocks = int64(exp.ChainParams.TicketExpiry)
} else { // Active
// Blocks ticket has been active and eligible to vote
tx.TicketInfo.TicketLiveBlocks = blocksLive
}
tx.TicketInfo.BestLuck = tx.TicketInfo.TicketExpiry / int64(exp.ChainParams.TicketPoolSize)
tx.TicketInfo.AvgLuck = tx.TicketInfo.BestLuck - 1
if tx.TicketInfo.TicketLiveBlocks == int64(exp.ChainParams.TicketExpiry) {
tx.TicketInfo.VoteLuck = 0
} else {
tx.TicketInfo.VoteLuck = float64(tx.TicketInfo.BestLuck) -
(float64(tx.TicketInfo.TicketLiveBlocks) / float64(exp.ChainParams.TicketPoolSize))
}
if tx.TicketInfo.VoteLuck >= float64(tx.TicketInfo.BestLuck-
(1/int64(exp.ChainParams.TicketPoolSize))) {
tx.TicketInfo.LuckStatus = "Perfection"
} else if tx.TicketInfo.VoteLuck > (float64(tx.TicketInfo.BestLuck) - 0.25) {
tx.TicketInfo.LuckStatus = "Very Lucky!"
} else if tx.TicketInfo.VoteLuck > (float64(tx.TicketInfo.BestLuck) - 0.75) {
tx.TicketInfo.LuckStatus = "Good Luck"
} else if tx.TicketInfo.VoteLuck > (float64(tx.TicketInfo.BestLuck) - 1.25) {
tx.TicketInfo.LuckStatus = "Normal"
} else if tx.TicketInfo.VoteLuck > (float64(tx.TicketInfo.BestLuck) * 0.50) {
tx.TicketInfo.LuckStatus = "Bad Luck"
} else if tx.TicketInfo.VoteLuck > 0 {
tx.TicketInfo.LuckStatus = "Horrible Luck!"
} else if tx.TicketInfo.VoteLuck == 0 {
tx.TicketInfo.LuckStatus = "No Luck"
}
// Chance for a ticket to NOT be voted in a given time frame:
// C = (1 - P)^N
// Where: P is the probability of a vote in one block. (votes
// per block / current ticket pool size)
// N is the number of blocks before ticket expiry. (ticket
// expiry in blocks - (number of blocks since ticket purchase -
// ticket maturity))
// C is the probability (chance)
exp.pageData.RLock()
pVote := float64(exp.ChainParams.TicketsPerBlock) /
float64(exp.pageData.HomeInfo.PoolInfo.Size)
exp.pageData.RUnlock()
remainingBlocksLive := float64(exp.ChainParams.TicketExpiry) -
float64(blocksLive)
tx.TicketInfo.Probability = 100 * math.Pow(1-pVote, remainingBlocksLive)
}
} // tx.IsTicket()
// Prepare the string to display for previous outpoint.
for idx := range tx.Vin {
vin := &tx.Vin[idx]
if vin.Coinbase != "" {
vin.DisplayText = types.CoinbaseTypeStr
} else if vin.Stakebase != "" {
vin.DisplayText = "Stakebase"
} else {
voutStr := strconv.Itoa(int(vin.Vout))
vin.DisplayText = vin.Txid + ":" + voutStr
vin.TextIsHash = true
vin.Link = "/tx/" + vin.Txid + "/out/" + voutStr
}
}
// For an unconfirmed tx, get the time it was received in explorer's mempool.
if tx.BlockHeight == 0 {
tx.Time = exp.mempoolTime(tx.TxID)
}
pageData := struct {
*CommonPageData
Data *types.TxInfo
Blocks []*dbtypes.BlockStatus
BlockInds []uint32
IsConfirmedMainchain bool
HighlightInOut string
HighlightInOutID int64
Conversions struct {
Total *exchanges.Conversion
Fees *exchanges.Conversion
}
}{
CommonPageData: exp.commonData(r),
Data: tx,
Blocks: blocks,
BlockInds: blockInds,
IsConfirmedMainchain: isConfirmedMainchain,
HighlightInOut: inout,
HighlightInOutID: inoutid,
}
// Get a fiat-converted value for the total and the fees.
if exp.xcBot != nil {
pageData.Conversions.Total = exp.xcBot.Conversion(tx.Total)
pageData.Conversions.Fees = exp.xcBot.Conversion(tx.Fee.ToCoin())
}
str, err := exp.templates.exec("tx", pageData)
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.Header().Set("Turbolinks-Location", r.URL.RequestURI())
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// AddressPage is the page handler for the "/address" path.
func (exp *explorerUI) AddressPage(w http.ResponseWriter, r *http.Request) {
// AddressPageData is the data structure passed to the HTML template
type AddressPageData struct {
*CommonPageData
Data *dbtypes.AddressInfo
CRLFDownload bool
FiatBalance *exchanges.Conversion
Pages []pageNumber
}
// Grab the URL query parameters
address, txnType, limitN, offsetAddrOuts, err := parseAddressParams(r)
if err != nil {
exp.StatusPage(w, defaultErrorCode, err.Error(), address, ExpStatusError)
return
}
// Validate the address.
addr, addrType, addrErr := txhelpers.AddressValidation(address, exp.ChainParams)
isZeroAddress := addrErr == txhelpers.AddressErrorZeroAddress
if addrErr != nil && !isZeroAddress {
var status expStatus
var message string
code := defaultErrorCode
switch addrErr {
case txhelpers.AddressErrorDecodeFailed, txhelpers.AddressErrorUnknown:
status = ExpStatusError
message = "Unexpected issue validating this address."
case txhelpers.AddressErrorWrongNet:
status = ExpStatusWrongNetwork
message = fmt.Sprintf("The address %v is valid on %s, not %s.",
addr, exp.ChainParams.Net.String(), exp.NetName)
code = wrongNetwork
default:
status = ExpStatusError
message = "Unknown error."
}
exp.StatusPage(w, code, message, address, status)
return
}
// Handle valid but unsupported address types.
switch addrType {
case txhelpers.AddressTypeP2PKH, txhelpers.AddressTypeP2SH:
// All good.
case txhelpers.AddressTypeP2PK:
message := "Looks like you are searching for an address of type P2PK."
exp.StatusPage(w, defaultErrorCode, message, address, ExpStatusP2PKAddress)
return
default:
message := "Unsupported address type."
exp.StatusPage(w, defaultErrorCode, message, address, ExpStatusNotSupported)
return
}
// Retrieve address information from the DB and/or RPC.
var addrData *dbtypes.AddressInfo
if isZeroAddress {
// For the zero address (e.g. DsQxuVRvS4eaJ42dhQEsCXauMWjvopWgrVg),
// short-circuit any queries.
addrData = &dbtypes.AddressInfo{
Address: address,
Net: exp.ChainParams.Net.String(),
IsDummyAddress: true,
Balance: new(dbtypes.AddressBalance),
UnconfirmedTxns: new(dbtypes.AddressTransactions),
}
} else {
addrData, err = exp.AddressListData(address, txnType, limitN, offsetAddrOuts)
if exp.timeoutErrorPage(w, err, "TicketsPriceByHeight") {
return
} else if err != nil {
exp.StatusPage(w, defaultErrorCode, err.Error(), address, ExpStatusError)
return
}
}
// Set page parameters.
addrData.IsDummyAddress = isZeroAddress // may be redundant
addrData.Path = r.URL.Path
// If exchange monitoring is active, prepare a fiat balance conversion
conversion := exp.xcBot.Conversion(dcrutil.Amount(addrData.Balance.TotalUnspent).ToCoin())
// For Windows clients only, link to downloads with CRLF (\r\n) line
// endings.
UseCRLF := strings.Contains(r.UserAgent(), "Windows")
if limitN == 0 {
limitN = 20
}
linkTemplate := fmt.Sprintf("/address/%s?start=%%d&n=%d&txntype=%v", addrData.Address, limitN, txnType)
// Execute the HTML template.
pageData := AddressPageData{
CommonPageData: exp.commonData(r),
Data: addrData,
CRLFDownload: UseCRLF,
FiatBalance: conversion,
Pages: calcPages(int(addrData.TxnCount), int(limitN), int(offsetAddrOuts), linkTemplate),
}
str, err := exp.templates.exec("address", pageData)
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
log.Debugf(`"address" template HTML size: %.2f kiB (%s, %v, %d)`,
float64(len(str))/1024.0, address, txnType, addrData.NumTransactions)
w.Header().Set("Content-Type", "text/html")
w.Header().Set("Turbolinks-Location", r.URL.RequestURI())
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// AddressTable is the page handler for the "/addresstable" path.
func (exp *explorerUI) AddressTable(w http.ResponseWriter, r *http.Request) {
// Grab the URL query parameters
address, txnType, limitN, offsetAddrOuts, err := parseAddressParams(r)
if err != nil {
log.Errorf("AddressTable request error: %v", err)
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
addrData, err := exp.AddressListData(address, txnType, limitN, offsetAddrOuts)
if err != nil {
log.Errorf("AddressListData error: %v", err)
http.Error(w, http.StatusText(http.StatusInternalServerError),
http.StatusInternalServerError)
return
}
linkTemplate := "/address/" + addrData.Address + "?start=%d&n=" + strconv.FormatInt(limitN, 10) + "&txntype=" + fmt.Sprintf("%v", txnType)
response := struct {
TxnCount int64 `json:"tx_count"`
HTML string `json:"html"`
Pages []pageNumber `json:"pages"`
}{
TxnCount: addrData.TxnCount + addrData.NumUnconfirmed,
Pages: calcPages(int(addrData.TxnCount), int(limitN), int(offsetAddrOuts), linkTemplate),
}
response.HTML, err = exp.templates.exec("addresstable", struct {
Data *dbtypes.AddressInfo
}{
Data: addrData,
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
http.Error(w, http.StatusText(http.StatusInternalServerError),
http.StatusInternalServerError)
return
}
log.Debugf(`"addresstable" template HTML size: %.2f kiB (%s, %v, %d)`,
float64(len(response.HTML))/1024.0, address, txnType, addrData.NumTransactions)
w.Header().Set("Content-Type", "application/json")
enc := json.NewEncoder(w)
//enc.SetEscapeHTML(false)
err = enc.Encode(response)
if err != nil {
log.Debug(err)
}
}
// parseAddressParams is used by both /address and /addresstable.
func parseAddressParams(r *http.Request) (address string, txnType dbtypes.AddrTxnViewType, limitN, offsetAddrOuts int64, err error) {
// Get the address URL parameter, which should be set in the request context
// by the addressPathCtx middleware.
address, ok := r.Context().Value(ctxAddress).(string)
if !ok {
log.Trace("address not set")
err = fmt.Errorf("there seems to not be an address in this request")
return
}
// Number of outputs for the address to query the database for. The URL
// query parameter "n" is used to specify the limit (e.g. "?n=20").
limitN = defaultAddressRows
if nParam := r.URL.Query().Get("n"); nParam != "" {
var val uint64
val, err = strconv.ParseUint(nParam, 10, 64)
if err != nil {
err = fmt.Errorf("invalid n value")
return
}
if int64(val) > MaxAddressRows {
log.Warnf("addressPage: requested up to %d address rows, "+
"limiting to %d", limitN, MaxAddressRows)
limitN = MaxAddressRows
} else {
limitN = int64(val)
}
}
// Number of outputs to skip (OFFSET in database query). For UX reasons, the
// "start" URL query parameter is used.
if startParam := r.URL.Query().Get("start"); startParam != "" {
var val uint64
val, err = strconv.ParseUint(startParam, 10, 64)
if err != nil {
err = fmt.Errorf("invalid start value")
return
}
offsetAddrOuts = int64(val)
}
// Transaction types to show.
txntype := r.URL.Query().Get("txntype")
if txntype == "" {
txntype = "all"
}
txnType = dbtypes.AddrTxnViewTypeFromStr(txntype)
if txnType == dbtypes.AddrTxnUnknown {
err = fmt.Errorf("unknown txntype query value")
}
// log.Debugf("Showing transaction types: %s (%d)", txntype, txnType)
return
}
// AddressListData grabs a size-limited and type-filtered set of inputs/outputs
// for a given address.
func (exp *explorerUI) AddressListData(address string, txnType dbtypes.AddrTxnViewType, limitN, offsetAddrOuts int64) (addrData *dbtypes.AddressInfo, err error) {
// Get addresses table rows for the address.
addrData, err = exp.dataSource.AddressData(address, limitN,
offsetAddrOuts, txnType)
if dbtypes.IsTimeoutErr(err) { //exp.timeoutErrorPage(w, err, "TicketsPriceByHeight") {
return nil, err
} else if err != nil {
log.Errorf("AddressData error encountered: %v", err)
err = fmt.Errorf(defaultErrorMessage)
return nil, err
}
return
}
// DecodeTxPage handles the "decode/broadcast transaction" page. The actual
// decoding or broadcasting is handled by the websocket hub.
func (exp *explorerUI) DecodeTxPage(w http.ResponseWriter, r *http.Request) {
str, err := exp.templates.exec("rawtx", struct {
*CommonPageData
}{
CommonPageData: exp.commonData(r),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// Charts handles the charts displays showing the various charts plotted.
func (exp *explorerUI) Charts(w http.ResponseWriter, r *http.Request) {
exp.pageData.RLock()
tpSize := exp.pageData.HomeInfo.PoolInfo.Target
exp.pageData.RUnlock()
str, err := exp.templates.exec("charts", struct {
*CommonPageData
Premine int64
TargetPoolSize uint32
}{
CommonPageData: exp.commonData(r),
Premine: exp.premine,
TargetPoolSize: tpSize,
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// Search implements a primitive search algorithm by checking if the value in
// question is a block index, block hash, address hash or transaction hash and
// redirects to the appropriate page or displays an error.
func (exp *explorerUI) Search(w http.ResponseWriter, r *http.Request) {
// The ?search= query.
searchStr := r.URL.Query().Get("search")
// Strip leading and tailing whitespace.
searchStr = strings.TrimSpace(searchStr)
if searchStr == "" {
exp.StatusPage(w, "search failed", "The search term was empty.",
searchStr, ExpStatusBadRequest)
return
}
// Attempt to get a block hash by calling GetBlockHash of WiredDB or
// BlockHash of ChainDB to see if the URL query value is a block index. Then
// redirect to the block page if it is.
idx, err := strconv.ParseInt(searchStr, 10, 0)
if err == nil {
_, err = exp.dataSource.GetBlockHash(idx)
if err == nil {
http.Redirect(w, r, "/block/"+searchStr, http.StatusPermanentRedirect)
return
}
_, err = exp.dataSource.BlockHash(idx)
if err == nil {
http.Redirect(w, r, "/block/"+searchStr, http.StatusPermanentRedirect)
return
}
exp.StatusPage(w, "search failed", "Block "+searchStr+
" has not yet been mined", searchStr, ExpStatusNotFound)
return
}
// Execute search for proposals by both RefID and proposal token before
// the address search because most search strings with alphanumeric
// characters are interprated as addresses.
if exp.proposalsSource != nil {
// Check if the search term references a proposal token exists.
proposalInfo, err := exp.proposalsSource.ProposalByToken(searchStr)
if err != nil || proposalInfo.RefID == "" {
// Check if the search term references a proposal RefID exists.
proposalInfo, err = exp.proposalsSource.ProposalByRefID(searchStr)
}
if err == nil && proposalInfo.RefID != "" {
http.Redirect(w, r, "/proposal/"+proposalInfo.RefID, http.StatusPermanentRedirect)
return
}
}
// Call GetExplorerAddress to see if the value is an address hash and
// then redirect to the address page if it is.
address, _, addrErr := exp.dataSource.GetExplorerAddress(searchStr, 1, 0)
switch addrErr {
case txhelpers.AddressErrorNoError, txhelpers.AddressErrorZeroAddress:
http.Redirect(w, r, "/address/"+searchStr, http.StatusPermanentRedirect)
return
case txhelpers.AddressErrorWrongNet:
// Status page will provide a link, but the address page can too.
message := fmt.Sprintf("The address %v is valid on %s, not %s",
searchStr, address.Net, exp.NetName)
exp.StatusPage(w, wrongNetwork, message, searchStr, ExpStatusWrongNetwork)
return
}
// This is be unnecessarily duplicative and possible very slow for a very
// active addresses.
addrHist, _, _ := exp.dataSource.AddressHistory(searchStr,
1, 0, dbtypes.AddrTxnAll)
if len(addrHist) > 0 {
http.Redirect(w, r, "/address/"+searchStr, http.StatusPermanentRedirect)
return
}
// Split searchStr to the first part corresponding to a transaction hash and
// to the second part corresponding to a transaction output index.
searchStrSplit := strings.Split(searchStr, ":")
searchStrRewritten := searchStrSplit[0]
switch {
case len(searchStrSplit) > 2:
exp.StatusPage(w, "search failed", "Transaction outpoint does not have a valid format: "+searchStr,
"", ExpStatusNotFound)
return
case len(searchStrSplit) > 1:
if _, err := strconv.ParseUint(searchStrSplit[1], 10, 32); err == nil {
searchStrRewritten = searchStrRewritten + "/out/" + searchStrSplit[1]
} else {
exp.StatusPage(w, "search failed", "Transaction output index is not a valid non-negative integer: "+searchStrSplit[1],
"", ExpStatusNotFound)
return
}
}
// Remaining possibilities are hashes, so verify the string is a hash.
if _, err = chainhash.NewHashFromStr(searchStrSplit[0]); err != nil {
exp.StatusPage(w, "search failed",
"Search string is not a valid hash or address: "+searchStr,
"", ExpStatusNotFound)
return
}
// Attempt to get a block index by calling GetBlockHeight to see if the
// value is a block hash and then redirect to the block page if it is.
_, err = exp.dataSource.GetBlockHeight(searchStrSplit[0])
if err == nil {
http.Redirect(w, r, "/block/"+searchStrSplit[0], http.StatusPermanentRedirect)
return
}
// Call GetExplorerTx to see if the value is a transaction hash and then
// redirect to the tx page if it is.
tx := exp.dataSource.GetExplorerTx(searchStrSplit[0])
if tx != nil {
http.Redirect(w, r, "/tx/"+searchStrRewritten, http.StatusPermanentRedirect)
return
}
// Also check the aux DB as it may have transactions from orphaned blocks.
dbTxs, err := exp.dataSource.Transaction(searchStrSplit[0])
if err != nil && err != sql.ErrNoRows {
log.Errorf("Searching for transaction failed: %v", err)
}
if dbTxs != nil {
http.Redirect(w, r, "/tx/"+searchStrRewritten, http.StatusPermanentRedirect)
return
}
message := "The search did not find any matching address, block, transaction or proposal token: " + searchStr
exp.StatusPage(w, "search failed", message, "", ExpStatusNotFound)
}
// StatusPage provides a page for displaying status messages and exception
// handling without redirecting. Be sure to return after calling StatusPage if
// this completes the processing of the calling http handler.
func (exp *explorerUI) StatusPage(w http.ResponseWriter, code, message, additionalInfo string, sType expStatus) {
commonPageData := exp.commonData(dummyRequest)
if commonPageData == nil {
// exp.blockData.GetTip likely failed due to empty DB.
http.Error(w, "The database is initializing. Try again later.",
http.StatusServiceUnavailable)
return
}
str, err := exp.templates.exec("status", struct {
*CommonPageData
StatusType expStatus
Code string
Message string
AdditionalInfo string
}{
CommonPageData: commonPageData,
StatusType: sType,
Code: code,
Message: message,
AdditionalInfo: additionalInfo,
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
str = "Something went very wrong if you can see this, try refreshing"
}
w.Header().Set("Content-Type", "text/html")
switch sType {
case ExpStatusDBTimeout:
w.WriteHeader(http.StatusServiceUnavailable)
case ExpStatusNotFound:
w.WriteHeader(http.StatusNotFound)
case ExpStatusFutureBlock:
w.WriteHeader(http.StatusOK)
case ExpStatusError:
w.WriteHeader(http.StatusInternalServerError)
// When blockchain sync is running, status 202 is used to imply that the
// other requests apart from serving the status sync page have been received
// and accepted but cannot be processed now till the sync is complete.
case ExpStatusSyncing:
w.WriteHeader(http.StatusAccepted)
case ExpStatusNotSupported:
w.WriteHeader(http.StatusUnprocessableEntity)
case ExpStatusBadRequest:
w.WriteHeader(http.StatusBadRequest)
default:
w.WriteHeader(http.StatusServiceUnavailable)
}
io.WriteString(w, str)
}
// NotFound wraps StatusPage to display a 404 page.
func (exp *explorerUI) NotFound(w http.ResponseWriter, r *http.Request) {
exp.StatusPage(w, "Page not found.", "Cannot find page: "+r.URL.Path, "", ExpStatusNotFound)
}
// ParametersPage is the page handler for the "/parameters" path.
func (exp *explorerUI) ParametersPage(w http.ResponseWriter, r *http.Request) {
params := exp.ChainParams
addrPrefix := types.AddressPrefixes(params)
actualTicketPoolSize := int64(params.TicketPoolSize * params.TicketsPerBlock)
exp.pageData.RLock()
var maxBlockSize int64
if exp.pageData.BlockchainInfo != nil {
maxBlockSize = exp.pageData.BlockchainInfo.MaxBlockSize
} else {
maxBlockSize = int64(params.MaximumBlockSizes[0])
}
exp.pageData.RUnlock()
type ExtendedParams struct {
MaximumBlockSize int64
ActualTicketPoolSize int64
AddressPrefix []types.AddrPrefix
}
str, err := exp.templates.exec("parameters", struct {
*CommonPageData
ExtendedParams
}{
CommonPageData: exp.commonData(r),
ExtendedParams: ExtendedParams{
MaximumBlockSize: maxBlockSize,
AddressPrefix: addrPrefix,
ActualTicketPoolSize: actualTicketPoolSize,
},
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// AgendaPage is the page handler for the "/agenda" path.
func (exp *explorerUI) AgendaPage(w http.ResponseWriter, r *http.Request) {
errPageInvalidAgenda := func(err error) {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, "the agenda ID given seems to not exist",
"", ExpStatusNotFound)
}
// Attempt to get agendaid string from URL path.
agendaId := getAgendaIDCtx(r)
agendaInfo, err := exp.agendasSource.AgendaInfo(agendaId)
if err != nil {
errPageInvalidAgenda(err)
return
}
summary, err := exp.dataSource.AgendasVotesSummary(agendaId)
if err != nil {
log.Errorf("fetching Cumulative votes choices count failed: %v", err)
}
// Overrides the default count value with the actual vote choices count
// matching data displayed on "Cumulative Vote Choices" and "Vote Choices By
// Block" charts.
var totalVotes uint32
for index := range agendaInfo.Choices {
switch strings.ToLower(agendaInfo.Choices[index].ID) {
case "abstain":
agendaInfo.Choices[index].Count = summary.Abstain
case "yes":
agendaInfo.Choices[index].Count = summary.Yes
case "no":
agendaInfo.Choices[index].Count = summary.No
}
totalVotes += agendaInfo.Choices[index].Count
}
ruleChangeI := exp.ChainParams.RuleChangeActivationInterval
qVotes := uint32(float64(ruleChangeI) * agendaInfo.QuorumProgress)
var timeLeft string
blocksLeft := summary.LockedIn - exp.Height()
if blocksLeft > 0 {
// Approximately 1 block per 5 minutes.
var minPerblock = 5 * time.Minute
hoursLeft := int((time.Duration(blocksLeft) * minPerblock).Hours())
if hoursLeft > 0 {
timeLeft = fmt.Sprintf("%v days %v hours", hoursLeft/24, hoursLeft%24)
}
} else {
blocksLeft = 0
}
str, err := exp.templates.exec("agenda", struct {
*CommonPageData
Ai *agendas.AgendaTagged
QuorumVotes uint32
RuleChangeI uint32
VotingStarted int64
LockedIn int64
BlocksLeft int64
TimeRemaining string
TotalVotes uint32
}{
CommonPageData: exp.commonData(r),
Ai: agendaInfo,
QuorumVotes: qVotes,
RuleChangeI: ruleChangeI,
VotingStarted: summary.VotingStarted,
LockedIn: summary.LockedIn,
BlocksLeft: blocksLeft,
TimeRemaining: timeLeft,
TotalVotes: totalVotes,
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// AgendasPage is the page handler for the "/agendas" path.
func (exp *explorerUI) AgendasPage(w http.ResponseWriter, r *http.Request) {
if exp.voteTracker == nil {
log.Warnf("Agendas requested with nil voteTracker")
exp.StatusPage(w, "", "agendas disabled on simnet", "", ExpStatusPageDisabled)
return
}
agenda, err := exp.agendasSource.AllAgendas()
if err != nil {
log.Errorf("Error fetching agendas: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
str, err := exp.templates.exec("agendas", struct {
*CommonPageData
Agendas []*agendas.AgendaTagged
VotingSummary *agendas.VoteSummary
}{
CommonPageData: exp.commonData(r),
Agendas: agenda,
VotingSummary: exp.voteTracker.Summary(),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// ProposalPage is the page handler for the "/proposal" path.
func (exp *explorerUI) ProposalPage(w http.ResponseWriter, r *http.Request) {
if exp.proposalsSource == nil {
errMsg := "Remove the disable-piparser flag to activate it."
log.Errorf("proposal page is disabled. %s", errMsg)
exp.StatusPage(w, errMsg, fmt.Sprintf(pageDisabledCode, "/proposals"), "", ExpStatusPageDisabled)
return
}
// Attempts to retrieve a proposal refID from the URL path.
param := getProposalTokenCtx(r)
proposalInfo, err := exp.proposalsSource.ProposalByRefID(param)
if err != nil {
// Check if the URL parameter passed is a proposal token and attempt to
// fetch its data.
proposalInfo, newErr := exp.proposalsSource.ProposalByToken(param)
if newErr == nil && proposalInfo != nil && proposalInfo.RefID != "" {
// redirect to a human readable url (replace the token with the RefID)
http.Redirect(w, r, "/proposal/"+proposalInfo.RefID, http.StatusPermanentRedirect)
return
}
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, "the proposal token or RefID does not exist",
"", ExpStatusNotFound)
return
}
commonData := exp.commonData(r)
str, err := exp.templates.exec("proposal", struct {
*CommonPageData
Data *pitypes.ProposalInfo
PoliteiaURL string
Metadata *pitypes.ProposalMetadata
}{
CommonPageData: commonData,
Data: proposalInfo,
PoliteiaURL: exp.politeiaAPIURL,
Metadata: proposalInfo.Metadata(int64(commonData.Tip.Height), int64(exp.ChainParams.TargetTimePerBlock/time.Second)),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// ProposalsPage is the page handler for the "/proposals" path.
func (exp *explorerUI) ProposalsPage(w http.ResponseWriter, r *http.Request) {
if exp.proposalsSource == nil {
errMsg := "Remove the disable-piparser flag to activate it."
log.Errorf("proposals page is disabled. %s", errMsg)
exp.StatusPage(w, errMsg, fmt.Sprintf(pageDisabledCode, "/proposals"), "", ExpStatusPageDisabled)
return
}
rowsCount := uint64(20)
if rowsStr := r.URL.Query().Get("rows"); rowsStr != "" {
val, err := strconv.ParseUint(rowsStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
if val > 0 {
rowsCount = val
}
}
var offset uint64
if offsetStr := r.URL.Query().Get("offset"); offsetStr != "" {
val, err := strconv.ParseUint(offsetStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
offset = val
}
var filterBy uint64
if filterByStr := r.URL.Query().Get("byvotestatus"); filterByStr != "" {
val, err := strconv.ParseUint(filterByStr, 10, 64)
if err != nil {
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
filterBy = val
}
var err error
var count int
var proposals []*pitypes.ProposalInfo
// Check if filter by votes status query parameter was passed.
if filterBy > 0 {
proposals, count, err = exp.proposalsSource.AllProposals(int(offset),
int(rowsCount), int(filterBy))
} else {
proposals, count, err = exp.proposalsSource.AllProposals(int(offset),
int(rowsCount))
}
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
str, err := exp.templates.exec("proposals", struct {
*CommonPageData
Proposals []*pitypes.ProposalInfo
VotesStatus map[pitypes.VoteStatusType]string
VStatusFilter int
Offset int64
Limit int64
TotalCount int64
PoliteiaURL string
LastVotesSync int64
LastPropSync int64
TimePerBlock int64
}{
CommonPageData: exp.commonData(r),
Proposals: proposals,
VotesStatus: pitypes.VotesStatuses(),
Offset: int64(offset),
Limit: int64(rowsCount),
VStatusFilter: int(filterBy),
TotalCount: int64(count),
PoliteiaURL: exp.politeiaAPIURL,
LastVotesSync: exp.dataSource.LastPiParserSync().UTC().Unix(),
LastPropSync: exp.proposalsSource.LastProposalsSync(),
TimePerBlock: int64(exp.ChainParams.TargetTimePerBlock.Seconds()),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// HandleApiRequestsOnSync handles all API request when the sync status pages is
// running.
func (exp *explorerUI) HandleApiRequestsOnSync(w http.ResponseWriter, r *http.Request) {
var complete int
dataFetched := SyncStatus()
syncStatus := "in progress"
if len(dataFetched) == complete {
syncStatus = "complete"
}
for _, v := range dataFetched {
if v.PercentComplete == 100 {
complete++
}
}
stageRunning := complete + 1
if stageRunning > len(dataFetched) {
stageRunning = len(dataFetched)
}
data, err := json.Marshal(struct {
Message string `json:"message"`
Stage int `json:"stage"`
Stages []SyncStatusInfo `json:"stages"`
}{
fmt.Sprintf("blockchain sync is %s.", syncStatus),
stageRunning,
dataFetched,
})
str := string(data)
if err != nil {
str = fmt.Sprintf("error occurred while processing the API response: %v", err)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusServiceUnavailable)
io.WriteString(w, str)
}
// StatsPage is the page handler for the "/stats" path.
func (exp *explorerUI) StatsPage(w http.ResponseWriter, r *http.Request) {
// Get current PoW difficulty.
powDiff, err := exp.dataSource.CurrentDifficulty()
if err != nil {
log.Errorf("Failed to get Difficulty: %v", err)
}
// Subsidies
ultSubsidy := txhelpers.UltimateSubsidy(exp.ChainParams)
bestBlockHeight, err := exp.dataSource.GetHeight()
if err != nil {
log.Errorf("GetHeight failed: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "",
ExpStatusError)
return
}
blockSubsidy := exp.dataSource.BlockSubsidy(bestBlockHeight,
exp.ChainParams.TicketsPerBlock)
// Safely retrieve the inventory pointer, which can be reset in StoreMPData.
inv := exp.MempoolInventory()
// Prevent modifications to the shared inventory struct (e.g. in the
// MempoolMonitor) while we retrieve the number of votes and tickets.
inv.RLock()
numVotes := inv.NumVotes
numTickets := inv.NumTickets
inv.RUnlock()
exp.pageData.RLock()
stats := types.StatsInfo{
TotalSupply: exp.pageData.HomeInfo.CoinSupply,
UltimateSupply: ultSubsidy,
TotalSupplyPercentage: float64(exp.pageData.HomeInfo.CoinSupply) /
float64(ultSubsidy) * 100,
ProjectFunds: exp.pageData.HomeInfo.DevFund,
ProjectAddress: exp.pageData.HomeInfo.DevAddress,
PoWDiff: exp.pageData.HomeInfo.Difficulty,
BlockReward: blockSubsidy.Total,
NextBlockReward: exp.pageData.HomeInfo.NBlockSubsidy.Total,
PoWReward: exp.pageData.HomeInfo.NBlockSubsidy.PoW,
PoSReward: exp.pageData.HomeInfo.NBlockSubsidy.PoS,
ProjectFundReward: exp.pageData.HomeInfo.NBlockSubsidy.Dev,
VotesInMempool: numVotes,
TicketsInMempool: numTickets,
TicketPrice: exp.pageData.HomeInfo.StakeDiff,
NextEstimatedTicketPrice: exp.pageData.HomeInfo.NextExpectedStakeDiff,
TicketPoolSize: exp.pageData.HomeInfo.PoolInfo.Size,
TicketPoolSizePerToTarget: float64(exp.pageData.HomeInfo.PoolInfo.Size) /
float64(exp.ChainParams.TicketPoolSize*exp.ChainParams.TicketsPerBlock) * 100,
TicketPoolValue: exp.pageData.HomeInfo.PoolInfo.Value,
TPVOfTotalSupplyPeecentage: exp.pageData.HomeInfo.PoolInfo.Percentage,
TicketsROI: exp.pageData.HomeInfo.TicketReward,
RewardPeriod: exp.pageData.HomeInfo.RewardPeriod,
ASR: exp.pageData.HomeInfo.ASR,
APR: exp.pageData.HomeInfo.ASR,
IdxBlockInWindow: exp.pageData.HomeInfo.IdxBlockInWindow,
WindowSize: exp.pageData.HomeInfo.Params.WindowSize,
BlockTime: exp.pageData.HomeInfo.Params.BlockTime,
IdxInRewardWindow: exp.pageData.HomeInfo.IdxInRewardWindow,
RewardWindowSize: exp.pageData.HomeInfo.Params.RewardWindowSize,
HashRate: powDiff * math.Pow(2, 32) /
exp.ChainParams.TargetTimePerBlock.Seconds() / math.Pow(10, 15),
}
exp.pageData.RUnlock()
str, err := exp.templates.exec("statistics", struct {
*CommonPageData
Stats types.StatsInfo
}{
CommonPageData: exp.commonData(r),
Stats: stats,
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// MarketPage is the page handler for the "/agendas" path.
func (exp *explorerUI) MarketPage(w http.ResponseWriter, r *http.Request) {
str, err := exp.templates.exec("market", struct {
*CommonPageData
DepthMarkets []string
StickMarkets map[string]string
XcState *exchanges.ExchangeBotState
}{
CommonPageData: exp.commonData(r),
XcState: exp.getExchangeState(),
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
// commonData grabs the common page data that is available to every page.
// This is particularly useful for extras.tmpl, parts of which
// are used on every page
func (exp *explorerUI) commonData(r *http.Request) *CommonPageData {
tip, err := exp.dataSource.GetTip()
if err != nil {
log.Errorf("Failed to get the chain tip from the database.: %v", err)
return nil
}
darkMode, err := r.Cookie(darkModeCoookie)
if err != nil && err != http.ErrNoCookie {
log.Errorf("Cookie dcrdataDarkBG retrieval error: %v", err)
}
return &CommonPageData{
Tip: tip,
Version: exp.Version,
ChainParams: exp.ChainParams,
BlockTimeUnix: int64(exp.ChainParams.TargetTimePerBlock.Seconds()),
DevAddress: exp.pageData.HomeInfo.DevAddress,
NetName: exp.NetName,
Links: explorerLinks,
Cookies: Cookies{
DarkMode: darkMode != nil && darkMode.Value == "1",
},
RequestURI: r.URL.RequestURI(),
}
}
// A page number has the information necessary to create numbered pagination
// links.
type pageNumber struct {
Active bool `json:"active"`
Link string `json:"link"`
Str string `json:"str"`
}
func makePageNumber(active bool, link, str string) pageNumber {
return pageNumber{
Active: active,
Link: link,
Str: str,
}
}
type pageNumbers []pageNumber
const ellipsisHTML = "…"
// Get a set of pagination numbers, based on a set number of rows that are
// assumed to start from page 1 at the highest row and descend from there.
// For example, if there are 20 pages of 10 rows, 0 - 199, page 1 would start at
// row 199 and go down to row 190. If the offset is between 190 and 199, the
// pagination would return the pageNumbers necessary to create a pagination
// That looks like 1 2 3 4 5 6 7 8 ... 20. The pageNumber includes a link with
// the offset inserted using Sprintf.
func calcPagesDesc(rows, pageSize, offset int, link string) pageNumbers {
nums := make(pageNumbers, 0, 11)
endIdx := rows / pageSize
if endIdx == 0 {
return nums
}
pages := endIdx + 1
currentPageIdx := (rows - offset) / pageSize
if pages > 10 {
nums = append(nums, makePageNumber(currentPageIdx == 0, fmt.Sprintf(link, rows), "1"))
start := currentPageIdx - 3
endMiddle := start + 6
if start <= 1 {
start = 1
endMiddle = 7
} else if endMiddle >= endIdx-1 {
endMiddle = endIdx - 1
start = endMiddle - 6
}
if start > 1 {
nums = append(nums, makePageNumber(false, "", ellipsisHTML))
}
for i := start; i <= endMiddle; i++ {
nums = append(nums, makePageNumber(i == currentPageIdx, fmt.Sprintf(link, rows-i*pageSize), strconv.Itoa(i+1)))
}
if endMiddle < endIdx-1 {
nums = append(nums, makePageNumber(false, "", ellipsisHTML))
}
if pages > 1 {
nums = append(nums, makePageNumber(currentPageIdx == endIdx, fmt.Sprintf(link, rows-endIdx*pageSize), strconv.Itoa(pages)))
}
} else {
for i := 0; i < pages; i++ {
nums = append(nums, makePageNumber(i == currentPageIdx, fmt.Sprintf(link, rows-i*pageSize), strconv.Itoa(i+1)))
}
}
return nums
}
// Get a set of pagination numbers, based on a set number of rows that are
// assumed to start from page 1 at the lowest row and ascend from there.
// For example, if there are 20 pages of 10 rows, 0 - 199, page 1 would start at
// row 0 and go up to row 9. If the offset is between 0 and 9, the
// pagination would return the pageNumbers necessary to create a pagination
// That looks like 1 2 3 4 5 6 7 8 ... 20. The pageNumber includes a link with
// the offset inserted using Sprintf.
func calcPages(rows, pageSize, offset int, link string) pageNumbers {
nums := make(pageNumbers, 0, 11)
endIdx := rows / pageSize
if endIdx == 0 {
return nums
}
pages := endIdx + 1
currentPageIdx := offset / pageSize
if pages > 10 {
nums = append(nums, makePageNumber(currentPageIdx == 0, fmt.Sprintf(link, 0), "1"))
start := currentPageIdx - 3
endMiddle := start + 6
if start <= 1 {
start = 1
endMiddle = 7
} else if endMiddle >= endIdx-1 {
endMiddle = endIdx - 1
start = endMiddle - 6
}
if start > 1 {
nums = append(nums, makePageNumber(false, "", ellipsisHTML))
}
for i := start; i <= endMiddle; i++ {
nums = append(nums, makePageNumber(i == currentPageIdx, fmt.Sprintf(link, i*pageSize), strconv.Itoa(i+1)))
}
if endMiddle < endIdx-1 {
nums = append(nums, makePageNumber(false, "", ellipsisHTML))
}
if pages > 1 {
nums = append(nums, makePageNumber(currentPageIdx == endIdx, fmt.Sprintf(link, endIdx*pageSize), strconv.Itoa(pages)))
}
} else {
for i := 0; i < pages; i++ {
nums = append(nums, makePageNumber(i == currentPageIdx, fmt.Sprintf(link, i*pageSize), strconv.Itoa(i+1)))
}
}
return nums
}
// AttackCost is the page handler for the "/attack-cost" path.
func (exp *explorerUI) AttackCost(w http.ResponseWriter, r *http.Request) {
price := 24.42
if exp.xcBot != nil {
if rate := exp.xcBot.Conversion(1.0); rate != nil {
price = rate.Value
}
}
exp.pageData.RLock()
height := exp.pageData.BlockInfo.Height
ticketPoolValue := exp.pageData.HomeInfo.PoolInfo.Value
ticketPoolSize := exp.pageData.HomeInfo.PoolInfo.Size
ticketPrice := exp.pageData.HomeInfo.StakeDiff
HashRate := exp.pageData.HomeInfo.HashRate
exp.pageData.RUnlock()
str, err := exp.templates.execTemplateToString("attackcost", struct {
*CommonPageData
HashRate float64
Height int64
DCRPrice float64
TicketPrice float64
TicketPoolSize int64
TicketPoolValue float64
}{
CommonPageData: exp.commonData(r),
HashRate: HashRate,
Height: height,
DCRPrice: price,
TicketPrice: ticketPrice,
TicketPoolSize: int64(ticketPoolSize),
TicketPoolValue: ticketPoolValue,
})
if err != nil {
log.Errorf("Template execute failure: %v", err)
exp.StatusPage(w, defaultErrorCode, defaultErrorMessage, "", ExpStatusError)
return
}
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
io.WriteString(w, str)
}
| {
if chainParams == nil {
return "invalid"
}
if strings.HasPrefix(strings.ToLower(chainParams.Name), "testnet") {
return testnetNetName
}
return strings.Title(chainParams.Name)
} |
detailedlist.py | from rubicon.java.android_events import Handler, PythonRunnable
from rubicon.java.jni import java
from travertino.size import at_least
from ..libs.android import R__color
from ..libs.android.graphics import BitmapFactory, Rect
from ..libs.android.view import Gravity, OnClickListener, View__MeasureSpec
from ..libs.android.widget import (
ImageView,
ImageView__ScaleType,
LinearLayout,
LinearLayout__LayoutParams,
RelativeLayout,
RelativeLayout__LayoutParams,
ScrollView,
TextView
)
from ..libs.androidx.swiperefreshlayout import (
SwipeRefreshLayout,
SwipeRefreshLayout__OnRefreshListener
)
from .base import Widget
class DetailedListOnClickListener(OnClickListener):
def __init__(self, impl, row_number):
super().__init__()
self._impl = impl
self._row_number = row_number
def onClick(self, _view):
row = self._impl.interface.data[self._row_number]
self._impl._selection = row
if self._impl.interface.on_select:
self._impl.interface.on_select(self._impl.interface, row=self._impl.interface.data[self._row_number])
class OnRefreshListener(SwipeRefreshLayout__OnRefreshListener):
def __init__(self, interface):
super().__init__()
self._interface = interface
def onRefresh(self):
if self._interface.on_refresh:
self._interface.on_refresh(self._interface)
class DetailedList(Widget):
ROW_HEIGHT = 250
_swipe_refresh_layout = None
_scroll_view = None
_dismissable_container = None
_selection = None
def create(self):
# DetailedList is not a specific widget on Android, so we build it out
# of a few pieces.
if self.native is None:
self.native = LinearLayout(self._native_activity)
self.native.setOrientation(LinearLayout.VERTICAL)
else:
# If create() is called a second time, clear the widget and regenerate it.
self.native.removeAllViews()
scroll_view = ScrollView(self._native_activity)
self._scroll_view = ScrollView(
__jni__=java.NewGlobalRef(scroll_view))
scroll_view_layout_params = LinearLayout__LayoutParams(
LinearLayout__LayoutParams.MATCH_PARENT,
LinearLayout__LayoutParams.MATCH_PARENT
)
scroll_view_layout_params.gravity = Gravity.TOP
swipe_refresh_wrapper = SwipeRefreshLayout(self._native_activity)
swipe_refresh_wrapper.setOnRefreshListener(OnRefreshListener(self.interface))
self._swipe_refresh_layout = SwipeRefreshLayout(
__jni__=java.NewGlobalRef(swipe_refresh_wrapper))
swipe_refresh_wrapper.addView(scroll_view)
self.native.addView(swipe_refresh_wrapper, scroll_view_layout_params)
dismissable_container = LinearLayout(self._native_activity)
self._dismissable_container = LinearLayout(
__jni__=java.NewGlobalRef(dismissable_container)
)
dismissable_container.setOrientation(LinearLayout.VERTICAL)
dismissable_container_params = LinearLayout__LayoutParams(
LinearLayout__LayoutParams.MATCH_PARENT,
LinearLayout__LayoutParams.MATCH_PARENT
)
scroll_view.addView(
dismissable_container, dismissable_container_params
)
for i in range(len((self.interface.data or []))):
self._make_row(dismissable_container, i)
def _make_row(self, container, i):
# Create the foreground.
row_foreground = RelativeLayout(self._native_activity)
container.addView(row_foreground)
# Add user-provided icon to layout.
icon_image_view = ImageView(self._native_activity)
icon = self.interface.data[i].icon
if icon is not None:
icon.bind(self.interface.factory)
bitmap = BitmapFactory.decodeFile(str(icon._impl.path))
icon_image_view.setImageBitmap(bitmap)
icon_layout_params = RelativeLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.WRAP_CONTENT)
icon_layout_params.width = 150
icon_layout_params.setMargins(25, 0, 25, 0)
icon_layout_params.height = self.ROW_HEIGHT
icon_image_view.setScaleType(ImageView__ScaleType.FIT_CENTER)
row_foreground.addView(icon_image_view, icon_layout_params)
# Create layout to show top_text and bottom_text.
text_container = LinearLayout(self._native_activity)
text_container_params = RelativeLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.WRAP_CONTENT)
text_container_params.height = self.ROW_HEIGHT
text_container_params.setMargins(25 + 25 + 150, 0, 0, 0) | row_foreground.addView(text_container, text_container_params)
text_container.setOrientation(LinearLayout.VERTICAL)
text_container.setWeightSum(2.0)
# Create top & bottom text; add them to layout.
top_text = TextView(self._native_activity)
top_text.setText(str(getattr(self.interface.data[i], 'title', '')))
top_text.setTextSize(20.0)
top_text.setTextColor(self._native_activity.getResources().getColor(R__color.black))
bottom_text = TextView(self._native_activity)
bottom_text.setTextColor(self._native_activity.getResources().getColor(R__color.black))
bottom_text.setText(str(getattr(self.interface.data[i], 'subtitle', '')))
bottom_text.setTextSize(16.0)
top_text_params = LinearLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.MATCH_PARENT)
top_text_params.weight = 1.0
top_text.setGravity(Gravity.BOTTOM)
text_container.addView(top_text, top_text_params)
bottom_text_params = LinearLayout__LayoutParams(
RelativeLayout__LayoutParams.WRAP_CONTENT,
RelativeLayout__LayoutParams.MATCH_PARENT)
bottom_text_params.weight = 1.0
bottom_text.setGravity(Gravity.TOP)
bottom_text_params.gravity = Gravity.TOP
text_container.addView(bottom_text, bottom_text_params)
# Apply an onclick listener so that clicking anywhere on the row triggers Toga's on_select(row).
row_foreground.setOnClickListener(DetailedListOnClickListener(self, i))
def change_source(self, source):
# If the source changes, re-build the widget.
self.create()
def set_on_refresh(self, handler):
# No special handling needed.
pass
def after_on_refresh(self):
if self._swipe_refresh_layout:
self._swipe_refresh_layout.setRefreshing(False)
def insert(self, index, item):
# If the data changes, re-build the widget. Brutally effective.
self.create()
def change(self, item):
# If the data changes, re-build the widget. Brutally effective.
self.create()
def remove(self, index, item):
# If the data changes, re-build the widget. Brutally effective.
self.create()
def clear(self):
# If the data changes, re-build the widget. Brutally effective.
self.create()
def get_selection(self):
return self._selection
def set_on_select(self, handler):
# No special handling required.
pass
def set_on_delete(self, handler):
# This widget currently does not implement event handlers for data change.
self.interface.factory.not_implemented("DetailedList.set_on_delete()")
def scroll_to_row(self, row):
def scroll():
row_obj = self._dismissable_container.getChildAt(row)
hit_rect = Rect()
row_obj.getHitRect(hit_rect)
self._scroll_view.requestChildRectangleOnScreen(
self._dismissable_container,
hit_rect,
False,
)
Handler().post(PythonRunnable(scroll))
def rehint(self):
# Android can crash when rendering some widgets until they have their layout params set. Guard for that case.
if self.native.getLayoutParams() is None:
return
self.native.measure(
View__MeasureSpec.UNSPECIFIED,
View__MeasureSpec.UNSPECIFIED,
)
self.interface.intrinsic.width = at_least(self.native.getMeasuredWidth())
self.interface.intrinsic.height = self.native.getMeasuredHeight() | |
ipc.go | // Copyright (c) 2016 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package main
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"os"
)
// Messages sent over a pipe are encoded using a simple binary message format:
//
// - Protocol version (1 byte, currently 1)
// - Message type length (1 byte)
// - Message type string (encoded as UTF8, no longer than 255 bytes)
// - Message payload length (4 bytes, little endian)
// - Message payload bytes (no longer than 2^32 - 1 bytes)
type pipeMessage interface {
Type() string
PayloadSize() uint32
WritePayload(w io.Writer) error
}
var outgoingPipeMessages = make(chan pipeMessage)
// serviceControlPipeRx reads from the file descriptor fd of a read end pipe.
// This is intended to be used as a simple control mechanism for parent
// processes to communicate with and and manage the lifetime of a hcd child
// process using a unidirectional pipe (on Windows, this is an anonymous pipe,
// not a named pipe).
//
// When the pipe is closed or any other errors occur reading the control
// message, shutdown begins. This prevents hcd from continuing to run
// unsupervised after the parent process closes unexpectedly.
//
// No control messages are currently defined and the only use for the pipe is to
// start clean shutdown when the pipe is closed. Control messages that follow
// the pipe message format can be added later as needed.
func serviceControlPipeRx(fd uintptr) {
pipe := os.NewFile(fd, fmt.Sprintf("|%v", fd))
r := bufio.NewReader(pipe)
for {
_, err := r.Discard(1024)
if err == io.EOF {
err = nil
break
}
if err != nil {
hcdLog.Errorf("Failed to read from pipe: %v", err)
break
}
}
select {
case shutdownRequestChannel <- struct{}{}:
default:
}
}
// serviceControlPipeTx sends pipe messages to the file descriptor fd of a write
// end pipe. This is intended to be a simple response and notification system
// for a child hcd process to communicate with a parent process without the
// need to go through the RPC server.
//
// See the comment on the pipeMessage interface for the binary encoding of a
// pipe message.
func serviceControlPipeTx(fd uintptr) {
defer drainOutgoingPipeMessages()
pipe := os.NewFile(fd, fmt.Sprintf("|%v", fd))
w := bufio.NewWriter(pipe)
headerBuffer := make([]byte, 0, 1+1+255+4) // capped to max header size
var err error
for m := range outgoingPipeMessages {
const protocolVersion byte = 1
mtype := m.Type()
psize := m.PayloadSize()
headerBuffer = append(headerBuffer, protocolVersion)
headerBuffer = append(headerBuffer, byte(len(mtype)))
headerBuffer = append(headerBuffer, mtype...)
buf := make([]byte, 4)
binary.LittleEndian.PutUint32(buf, psize)
headerBuffer = append(headerBuffer, buf...)
_, err = w.Write(headerBuffer)
if err != nil {
break
}
err = m.WritePayload(w)
if err != nil {
break
}
err = w.Flush()
if err != nil {
break
}
headerBuffer = headerBuffer[:0]
}
hcdLog.Errorf("Failed to write to pipe: %v", err)
}
func drainOutgoingPipeMessages() {
for range outgoingPipeMessages {
}
}
// The lifetimeEvent describes a startup or shutdown event. The message type
// string is "lifetimeevent".
//
// The payload size is always 2 bytes long. The first byte describes whether a
// service or event is about to run or whether startup has completed. The
// second byte, when applicable, describes which event or service is about to
// start or stop.
//
// 0 <event id>: The startup event is about to run
// 1 <ignored>: All startup tasks have completed
// 2 <event id>: The shutdown event is about to run
//
// Event IDs can take on the following values:
//
// 0: Database opening/closing
// 1: Ticket database opening/closing
// 2: Peer-to-peer server starting/stopping
//
// Note that not all subsystems are started/stopped or events run during the
// program's lifetime depending on what features are enabled through the config.
//
// As an example, the following messages may be sent during a typical execution:
//
// 0 0: The database is being opened
// 0 1: The ticket DB is being opened
// 0 2: The P2P server is starting
// 1 0: All startup tasks have completed
// 2 2: The P2P server is stopping
// 2 1: The ticket DB is being closed and written to disk
// 2 0: The database is being closed
type lifetimeEvent struct {
event lifetimeEventID
action lifetimeAction
}
var _ pipeMessage = (*lifetimeEvent)(nil)
type lifetimeEventID byte
const (
startupEvent lifetimeEventID = iota
startupComplete
shutdownEvent
)
type lifetimeAction byte
const (
lifetimeEventDBOpen lifetimeAction = iota
lifetimeEventP2PServer
)
func (*lifetimeEvent) Type() string { return "lifetimeevent" }
func (e *lifetimeEvent) PayloadSize() uint32 { return 2 }
func (e *lifetimeEvent) WritePayload(w io.Writer) error {
_, err := w.Write([]byte{byte(e.event), byte(e.action)})
return err
}
type lifetimeEventServer chan<- pipeMessage
func newLifetimeEventServer(outChan chan<- pipeMessage) lifetimeEventServer |
func (s lifetimeEventServer) notifyStartupEvent(action lifetimeAction) {
if s == nil {
return
}
s <- &lifetimeEvent{
event: startupEvent,
action: action,
}
}
func (s lifetimeEventServer) notifyStartupComplete() {
if s == nil {
return
}
s <- &lifetimeEvent{
event: startupComplete,
action: 0,
}
}
func (s lifetimeEventServer) notifyShutdownEvent(action lifetimeAction) {
if s == nil {
return
}
s <- &lifetimeEvent{
event: shutdownEvent,
action: action,
}
}
| {
return lifetimeEventServer(outChan)
} |
main.go | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"log"
"net"
"net/http"
"net/http/pprof"
"os"
"sort"
"strconv"
"strings"
"github.com/golang/glog"
"github.com/openshift/origin/pkg/util/proc"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
kcollectors "k8s.io/kube-state-metrics/collectors"
"k8s.io/kube-state-metrics/version"
)
const (
metricsPath = "/metrics"
healthzPath = "/healthz"
)
var (
defaultCollectors = collectorSet{
"daemonsets": struct{}{},
"deployments": struct{}{},
"limitranges": struct{}{},
"nodes": struct{}{},
"pods": struct{}{},
"replicasets": struct{}{},
"replicationcontrollers": struct{}{},
"resourcequotas": struct{}{},
"services": struct{}{},
"jobs": struct{}{},
"cronjobs": struct{}{},
"statefulsets": struct{}{},
"persistentvolumes": struct{}{},
"persistentvolumeclaims": struct{}{},
"namespaces": struct{}{},
"horizontalpodautoscalers": struct{}{},
"endpoints": struct{}{},
"secrets": struct{}{},
"configmaps": struct{}{},
}
availableCollectors = map[string]func(registry prometheus.Registerer, kubeClient clientset.Interface, namespace string){
"cronjobs": kcollectors.RegisterCronJobCollector,
"daemonsets": kcollectors.RegisterDaemonSetCollector,
"deployments": kcollectors.RegisterDeploymentCollector,
"jobs": kcollectors.RegisterJobCollector,
"limitranges": kcollectors.RegisterLimitRangeCollector,
"nodes": kcollectors.RegisterNodeCollector,
"pods": kcollectors.RegisterPodCollector,
"replicasets": kcollectors.RegisterReplicaSetCollector,
"replicationcontrollers": kcollectors.RegisterReplicationControllerCollector,
"resourcequotas": kcollectors.RegisterResourceQuotaCollector,
"services": kcollectors.RegisterServiceCollector,
"statefulsets": kcollectors.RegisterStatefulSetCollector,
"persistentvolumes": kcollectors.RegisterPersistentVolumeCollector,
"persistentvolumeclaims": kcollectors.RegisterPersistentVolumeClaimCollector,
"namespaces": kcollectors.RegisterNamespaceCollector,
"horizontalpodautoscalers": kcollectors.RegisterHorizontalPodAutoScalerCollector,
"endpoints": kcollectors.RegisterEndpointCollector,
"secrets": kcollectors.RegisterSecretCollector,
"configmaps": kcollectors.RegisterConfigMapCollector,
}
)
// promLogger implements promhttp.Logger
type promLogger struct{}
func (pl promLogger) Println(v ...interface{}) {
glog.Error(v)
}
type collectorSet map[string]struct{}
func (c *collectorSet) String() string {
s := *c
ss := s.asSlice()
sort.Strings(ss)
return strings.Join(ss, ",")
}
func (c *collectorSet) Set(value string) error {
s := *c
cols := strings.Split(value, ",")
for _, col := range cols {
_, ok := availableCollectors[col]
if !ok {
glog.Fatalf("Collector \"%s\" does not exist", col)
}
s[col] = struct{}{}
}
return nil
}
func (c collectorSet) asSlice() []string {
cols := []string{}
for col := range c {
cols = append(cols, col)
}
return cols
}
func (c collectorSet) isEmpty() bool {
return len(c.asSlice()) == 0
}
func (c *collectorSet) Type() string {
return "string"
}
type options struct {
apiserver string
kubeconfig string
help bool
port int
host string
telemetryPort int
telemetryHost string
collectors collectorSet
namespace string
version bool
}
func main() {
options := &options{collectors: make(collectorSet)}
flags := pflag.NewFlagSet("", pflag.ExitOnError)
// add glog flags
flags.AddGoFlagSet(flag.CommandLine)
flags.Lookup("logtostderr").Value.Set("true")
flags.Lookup("logtostderr").DefValue = "true"
flags.Lookup("logtostderr").NoOptDefVal = "true"
flags.StringVar(&options.apiserver, "apiserver", "", `The URL of the apiserver to use as a master`)
flags.StringVar(&options.kubeconfig, "kubeconfig", "", "Absolute path to the kubeconfig file")
flags.BoolVarP(&options.help, "help", "h", false, "Print help text")
flags.IntVar(&options.port, "port", 80, `Port to expose metrics on.`)
flags.StringVar(&options.host, "host", "0.0.0.0", `Host to expose metrics on.`)
flags.IntVar(&options.telemetryPort, "telemetry-port", 81, `Port to expose kube-state-metrics self metrics on.`)
flags.StringVar(&options.telemetryHost, "telemetry-host", "0.0.0.0", `Host to expose kube-state-metrics self metrics on.`)
flags.Var(&options.collectors, "collectors", fmt.Sprintf("Comma-separated list of collectors to be enabled. Defaults to %q", &defaultCollectors))
flags.StringVar(&options.namespace, "namespace", metav1.NamespaceAll, "namespace to be enabled for collecting resources")
flags.BoolVarP(&options.version, "version", "", false, "kube-state-metrics build version information")
flags.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
flags.PrintDefaults()
}
err := flags.Parse(os.Args)
if err != nil {
glog.Fatalf("Error: %s", err)
}
if options.version {
fmt.Printf("%#v\n", version.GetVersion())
os.Exit(0)
}
if options.help {
flags.Usage()
os.Exit(0)
}
var collectors collectorSet
if len(options.collectors) == 0 {
glog.Info("Using default collectors")
collectors = defaultCollectors
} else {
collectors = options.collectors
}
if options.namespace == metav1.NamespaceAll {
glog.Info("Using all namespace")
} else {
glog.Infof("Using %s namespace", options.namespace)
}
proc.StartReaper()
kubeClient, err := createKubeClient(options.apiserver, options.kubeconfig)
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
}
ksmMetricsRegistry := prometheus.NewRegistry()
ksmMetricsRegistry.Register(kcollectors.ResourcesPerScrapeMetric)
ksmMetricsRegistry.Register(kcollectors.ScrapeErrorTotalMetric)
ksmMetricsRegistry.Register(prometheus.NewProcessCollector(os.Getpid(), ""))
ksmMetricsRegistry.Register(prometheus.NewGoCollector())
go telemetryServer(ksmMetricsRegistry, options.telemetryHost, options.telemetryPort)
registry := prometheus.NewRegistry()
registerCollectors(registry, kubeClient, collectors, options.namespace)
metricsServer(registry, options.host, options.port)
}
func createKubeClient(apiserver string, kubeconfig string) (clientset.Interface, error) {
config, err := clientcmd.BuildConfigFromFlags(apiserver, kubeconfig)
if err != nil {
return nil, err
}
kubeClient, err := clientset.NewForConfig(config)
if err != nil {
return nil, err
}
// Informers don't seem to do a good job logging error messages when it
// can't reach the server, making debugging hard. This makes it easier to
// figure out if apiserver is configured incorrectly.
glog.Infof("Testing communication with server")
v, err := kubeClient.Discovery().ServerVersion()
if err != nil {
return nil, fmt.Errorf("ERROR communicating with apiserver: %v", err)
}
glog.Infof("Running with Kubernetes cluster version: v%s.%s. git version: %s. git tree state: %s. commit: %s. platform: %s",
v.Major, v.Minor, v.GitVersion, v.GitTreeState, v.GitCommit, v.Platform)
glog.Infof("Communication with server successful")
return kubeClient, nil
}
func telemetryServer(registry prometheus.Gatherer, host string, port int) {
// Address to listen on for web interface and telemetry
listenAddress := net.JoinHostPort(host, strconv.Itoa(port))
glog.Infof("Starting kube-state-metrics self metrics server: %s", listenAddress)
mux := http.NewServeMux()
// Add metricsPath
mux.Handle(metricsPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorLog: promLogger{}}))
// Add index
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Kube-State-Metrics Metrics Server</title></head>
<body>
<h1>Kube-State-Metrics Metrics</h1>
<ul>
<li><a href='` + metricsPath + `'>metrics</a></li>
</ul>
</body>
</html>`))
})
log.Fatal(http.ListenAndServe(listenAddress, mux))
}
func metricsServer(registry prometheus.Gatherer, host string, port int) {
// Address to listen on for web interface and telemetry
listenAddress := net.JoinHostPort(host, strconv.Itoa(port))
glog.Infof("Starting metrics server: %s", listenAddress)
mux := http.NewServeMux()
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
// Add metricsPath
mux.Handle(metricsPath, promhttp.HandlerFor(registry, promhttp.HandlerOpts{ErrorLog: promLogger{}}))
// Add healthzPath
mux.HandleFunc(healthzPath, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(200)
w.Write([]byte("ok"))
})
// Add index
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Kube Metrics Server</title></head>
<body>
<h1>Kube Metrics</h1>
<ul>
<li><a href='` + metricsPath + `'>metrics</a></li>
<li><a href='` + healthzPath + `'>healthz</a></li>
</ul>
</body>
</html>`))
})
log.Fatal(http.ListenAndServe(listenAddress, mux))
}
// registerCollectors creates and starts informers and initializes and
// registers metrics for collection.
func registerCollectors(registry prometheus.Registerer, kubeClient clientset.Interface, enabledCollectors collectorSet, namespace string) | {
activeCollectors := []string{}
for c := range enabledCollectors {
f, ok := availableCollectors[c]
if ok {
f(registry, kubeClient, namespace)
activeCollectors = append(activeCollectors, c)
}
}
glog.Infof("Active collectors: %s", strings.Join(activeCollectors, ","))
} |
|
userType.ts | export class userType{
id!:number;
type!:string; | } |
|
_version.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
VERSION = "27.1.0" | ||
spritesheet.go | package common
import (
"log"
"engo.io/engo"
"engo.io/gl"
"github.com/ilackarms/sprite-locator/models"
"encoding/json"
"image"
"github.com/engoengine/math"
)
// sort sprites into rows, starting with top left, and going down row by row
func sortSprites(sprites []models.Sprite) []models.Sprite {
sorted := []models.Sprite{}
origin := image.Pt(0, 0)
topLeft := sprites[0]
minDist := distance(origin, center(topLeft))
//find topleft sprite
for _, sprite := range sprites {
center := center(sprite)
dist := distance(origin, center(sprite))
if dist < minDist {
topLeft = sprite
minDist = dist
}
}
//next sprite = closest in X to top left, lowest value of Y
sprite0 := center(topLeft)
for _, sprite := range sprites {
dist := float32(center(sprite).X - sprite0.X)
if dist < minDist {
topLeft = sprite
minDist = dist
}
}
return sorted
}
func center(sprite models.Sprite) image.Point {
return image.Point{
X: (sprite.Min.X + sprite.Max.X)/2,
Y: (sprite.Min.Y + sprite.Max.Y)/2,
}
}
func distance(p1, p2 image.Point) float32 {
return math.Sqrt(math.Pow(p2.X - p1.X, 2)+math.Pow(p2.Y - p1.Y, 2))
}
// Spritesheet is a class that stores a set of tiles from a file, used by tilemaps and animations
type Spritesheet struct {
texture *gl.Texture // The original texture
width, height float32 // The dimensions of the total texture
Sprites []models.Sprite
cache map[int]Texture // The cell cache cells
}
func NewSpritesheetFromTexture(tr *TextureResource, metadata *TextResource) *Spritesheet {
var spriteMetadata models.Spritesheet
if err := json.Unmarshal([]byte(metadata.Text), &spriteMetadata); err != nil {
log.Println("[WARNING] [NewSpritesheetFromFile]: Unmarshalling json from ", metadata.URL(), ": ", err)
return nil
}
return &Spritesheet{texture: tr.Texture,
width: tr.Width, height: tr.Height,
Sprites: spriteMetadata.Sprites,
cache: make(map[int]Texture),
}
}
// NewSpritesheetFromFile is a simple handler for creating a new spritesheet from a file
// textureName is the name of a texture already preloaded with engo.Files.Add
func NewSpritesheetFromFile(textureName, textName string) *Spritesheet {
res, err := engo.Files.Resource(textureName)
if err != nil {
log.Println("[WARNING] [NewSpritesheetFromFile]: Received error:", err)
return nil
}
img, ok := res.(TextureResource)
if !ok {
log.Println("[WARNING] [NewSpritesheetFromFile]: Resource not of type `TextureResource`:", textureName)
return nil
}
res, err = engo.Files.Resource(textName)
if err != nil {
log.Println("[WARNING] [NewSpritesheetFromFile]: Received error:", err)
return nil
}
txt, ok := res.(TextResource)
if !ok |
return NewSpritesheetFromTexture(&img, &txt)
}
// Cell gets the region at the index i, updates and pulls from cache if need be
func (s *Spritesheet) Cell(index int) Texture {
if r, ok := s.cache[index]; ok {
return r
}
x0 := float32(s.Sprites[index].Min.X)
y0 := float32(s.Sprites[index].Min.Y)
x1 := float32(s.Sprites[index].Max.X)
y1 := float32(s.Sprites[index].Max.Y)
s.cache[index] = Texture{
id: s.texture,
width: x1 - x0,
height: y1 - y0,
viewport: engo.AABB{
engo.Point{
X: x0,
Y: y0,
},
engo.Point{
X: x1,
Y: y1,
},
}}
return s.cache[index]
}
func (s *Spritesheet) Drawable(index int) Drawable {
return s.Cell(index)
}
func (s *Spritesheet) Drawables() []Drawable {
drawables := make([]Drawable, s.CellCount())
for i := 0; i < s.CellCount(); i++ {
drawables[i] = s.Drawable(i)
}
return drawables
}
func (s *Spritesheet) CellCount() int {
return len(s.Sprites)
}
func (s *Spritesheet) Cells() []Texture {
cellsNo := s.CellCount()
cells := make([]Texture, cellsNo)
for i := 0; i < cellsNo; i++ {
cells[i] = s.Cell(i)
}
return cells
}
/*
type Sprite struct {
Position *Point
Scale *Point
Anchor *Point
Rotation float32
Color color.Color
Alpha float32
Region *Region
}
func NewSprite(region *Region, x, y float32) *Sprite {
return &Sprite{
Position: &Point{x, y},
Scale: &Point{1, 1},
Anchor: &Point{0, 0},
Rotation: 0,
Color: color.White,
Alpha: 1,
Region: region,
}
}
*/
| {
log.Println("[WARNING] [NewSpritesheetFromFile]: Resource not of type `TextResource`:", textureName)
return nil
} |
style.ts | import styled from 'styled-components';
export const EmptyStateWrapper = styled.div`
display: flex;
justify-content: center;
align-items: center;
flex-flow: column;
width: 100%;
height: 100%;
padding: 20px;
animation: fadein 0.3s;
@keyframes fadein {
from {
opacity: 0;
}
to {
opacity: 1; | }
`; | } |
generic.rs | extern crate packed_simd;
#[cfg(not(feature = "runtime-dispatch-simd"))]
use core::mem;
#[cfg(feature = "runtime-dispatch-simd")]
use std::mem;
use self::packed_simd::{u8x32, u8x64, FromCast};
const MASK: [u8; 64] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
];
unsafe fn u8x64_from_offset(slice: &[u8], offset: usize) -> u8x64 {
u8x64::from_slice_unaligned_unchecked(slice.get_unchecked(offset..))
}
unsafe fn u8x32_from_offset(slice: &[u8], offset: usize) -> u8x32 {
u8x32::from_slice_unaligned_unchecked(slice.get_unchecked(offset..))
}
fn sum_x64(u8s: &u8x64) -> usize {
let mut store = [0; mem::size_of::<u8x64>()];
u8s.write_to_slice_unaligned(&mut store);
store.iter().map(|&e| e as usize).sum()
}
fn sum_x32(u8s: &u8x32) -> usize {
let mut store = [0; mem::size_of::<u8x32>()];
u8s.write_to_slice_unaligned(&mut store);
store.iter().map(|&e| e as usize).sum() |
unsafe {
let mut offset = 0;
let mut count = 0;
let needles_x64 = u8x64::splat(needle);
// 16320
while haystack.len() >= offset + 64 * 255 {
let mut counts = u8x64::splat(0);
for _ in 0..255 {
counts -= u8x64::from_cast(u8x64_from_offset(haystack, offset).eq(needles_x64));
offset += 64;
}
count += sum_x64(&counts);
}
// 8192
if haystack.len() >= offset + 64 * 128 {
let mut counts = u8x64::splat(0);
for _ in 0..128 {
counts -= u8x64::from_cast(u8x64_from_offset(haystack, offset).eq(needles_x64));
offset += 64;
}
count += sum_x64(&counts);
}
let needles_x32 = u8x32::splat(needle);
// 32
let mut counts = u8x32::splat(0);
for i in 0..(haystack.len() - offset) / 32 {
counts -= u8x32::from_cast(u8x32_from_offset(haystack, offset + i * 32).eq(needles_x32));
}
if haystack.len() % 32 != 0 {
counts -= u8x32::from_cast(u8x32_from_offset(haystack, haystack.len() - 32).eq(needles_x32)) &
u8x32_from_offset(&MASK, haystack.len() % 32);
}
count += sum_x32(&counts);
count
}
}
fn is_leading_utf8_byte_x64(u8s: u8x64) -> u8x64 {
u8x64::from_cast((u8s & u8x64::splat(0b1100_0000)).ne(u8x64::splat(0b1000_0000)))
}
fn is_leading_utf8_byte_x32(u8s: u8x32) -> u8x32 {
u8x32::from_cast((u8s & u8x32::splat(0b1100_0000)).ne(u8x32::splat(0b1000_0000)))
}
pub fn chunk_num_chars(utf8_chars: &[u8]) -> usize {
assert!(utf8_chars.len() >= 32);
unsafe {
let mut offset = 0;
let mut count = 0;
// 16320
while utf8_chars.len() >= offset + 64 * 255 {
let mut counts = u8x64::splat(0);;
for _ in 0..255 {
counts -= is_leading_utf8_byte_x64(u8x64_from_offset(utf8_chars, offset));
offset += 64;
}
count += sum_x64(&counts);
}
// 8192
if utf8_chars.len() >= offset + 64 * 128 {
let mut counts = u8x64::splat(0);;
for _ in 0..128 {
counts -= is_leading_utf8_byte_x64(u8x64_from_offset(utf8_chars, offset));
offset += 64;
}
count += sum_x64(&counts);
}
// 32
let mut counts = u8x32::splat(0);
for i in 0..(utf8_chars.len() - offset) / 32 {
counts -= is_leading_utf8_byte_x32(u8x32_from_offset(utf8_chars, offset + i * 32));
}
if utf8_chars.len() % 32 != 0 {
counts -= is_leading_utf8_byte_x32(u8x32_from_offset(utf8_chars, utf8_chars.len() - 32)) &
u8x32_from_offset(&MASK, utf8_chars.len() % 32);
}
count += sum_x32(&counts);
count
}
} | }
pub fn chunk_count(haystack: &[u8], needle: u8) -> usize {
assert!(haystack.len() >= 32); |
TaobaoLifeReservationItemOrderChangeRequest.go | package trade
import (
"net/url"
"github.com/bububa/opentaobao/model"
)
/*
生服购后预约单外部发起变更 APIRequest
taobao.life.reservation.item.order.change
生服购后预约单外部发起变更,例如改期、取消。目前体检场景,用户会直接联系ISV改期/取消,因此开放给ISV这块的能力
*/
type TaobaoLifeReservationItemOrderChangeRequest struct {
model.Params
// 淘宝主单号
tradeNo string
// 凭证ID
ticketId string
// 改期:MODIFY 取消:CANCEL
action string
// 改期必填,格式:yyyy-MM-dd HH:mm。时分固定00:00
reserveStartTime string
// 改期必填,格式:yyyy-MM-dd HH:mm。时分固定23:59
reserveEndTime string
}
func NewTaobaoLifeReservationItemOrderChangeRequest() *TaobaoLifeReservationItemOrderChangeRequest{
return &TaobaoLifeReservationItemOrderChangeRequest{
Params: model.NewParams(),
}
}
func (r Ta | ApiMethodName() string {
return "taobao.life.reservation.item.order.change"
}
func (r TaobaoLifeReservationItemOrderChangeRequest) GetApiParams() url.Values {
params := url.Values{}
for k, v := range r.GetRawParams() {
params.Set(k, v.String())
}
return params
}
func (r *TaobaoLifeReservationItemOrderChangeRequest) SetTradeNo(tradeNo string) error {
r.tradeNo = tradeNo
r.Set("trade_no", tradeNo)
return nil
}
func (r TaobaoLifeReservationItemOrderChangeRequest) GetTradeNo() string {
return r.tradeNo
}
func (r *TaobaoLifeReservationItemOrderChangeRequest) SetTicketId(ticketId string) error {
r.ticketId = ticketId
r.Set("ticket_id", ticketId)
return nil
}
func (r TaobaoLifeReservationItemOrderChangeRequest) GetTicketId() string {
return r.ticketId
}
func (r *TaobaoLifeReservationItemOrderChangeRequest) SetAction(action string) error {
r.action = action
r.Set("action", action)
return nil
}
func (r TaobaoLifeReservationItemOrderChangeRequest) GetAction() string {
return r.action
}
func (r *TaobaoLifeReservationItemOrderChangeRequest) SetReserveStartTime(reserveStartTime string) error {
r.reserveStartTime = reserveStartTime
r.Set("reserve_start_time", reserveStartTime)
return nil
}
func (r TaobaoLifeReservationItemOrderChangeRequest) GetReserveStartTime() string {
return r.reserveStartTime
}
func (r *TaobaoLifeReservationItemOrderChangeRequest) SetReserveEndTime(reserveEndTime string) error {
r.reserveEndTime = reserveEndTime
r.Set("reserve_end_time", reserveEndTime)
return nil
}
func (r TaobaoLifeReservationItemOrderChangeRequest) GetReserveEndTime() string {
return r.reserveEndTime
}
| obaoLifeReservationItemOrderChangeRequest) Get |
gui_main.py | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QApplication
from gui_client.gui import MainWindow
import constants
import fire
from gui_client.current_state import CurrentState
from gui_client.gui_utils import ProgressDialog,show_info_dialog
from gui_client.work_thread import InitDictThread
from gui_client.gui_config import GuiConfigs
from gui_client.SettingDialog import SettingDialog
import signal
from constants import configs
#from signal import signal, SIGINT, SIGTERM
#signal(SIGTERM, lambda : exit(0))
#signal(SIGINT,lambda :exit(0))
'''
def show_mainwindow(dicts):
ProgressDialog.hide_progress()
if not dicts or dicts['status_code']!=0:
QtWidgets.QMessageBox.critical(ex,"Error",
"It seems the mmdict daemon is not running."
" Please first run the daemon. Click OK to exit.")
exit(1)
#msgBox.setWindowTitle("Error")
#msgBox.setText("It seems the mmdict daemon is not running. Please first run the daemon. "
# "Click OK to exit.")
#msgBox.buttonClicked.connect(lambda x: exit(1))
#msgBox.
else:
CurrentState.set_dict_infos(dicts['results'])
ex.show()
initThread=InitDictThread()
initThread.result_ready.connect(show_mainwindow)
initThread.finished.connect(initThread.deleteLater)
def run_gui():
ProgressDialog.show_progress(None,"Init dicts...")
initThread.start()
'''
app = QApplication([])
main_window = MainWindow()
app.aboutToQuit.connect(main_window.closing)
class Main:
def run(self):
main_window.show()
app.exec()
#def __run(self,dict_host=None,dict_port=None,http_host=None,http_port=None):
# if dict_host:
# configs.set_server_value(GuiConfigs.DICT_HOST,dict_host) | # if dict_port:
# configs.set_server_value(GuiConfigs.DICT_PORT,dict_port)
# if http_host:
# configs.set_server_value(GuiConfigs.HTTP_HOST,http_host)
# if http_port:
# configs.set_server_value(GuiConfigs.HTTP_PORT,http_port)
# run_gui()
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL)
fire.Fire(Main) | |
from_meta.rs | use std::borrow::Cow;
use std::cell::RefCell;
use std::collections::hash_map::HashMap;
use std::collections::HashSet;
use std::hash::BuildHasher;
use std::rc::Rc;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use syn::{Expr, Lit, Meta, NestedMeta};
use crate::{Error, Result};
/// Create an instance from an item in an attribute declaration.
///
/// # Implementing `FromMeta`
/// * Do not take a dependency on the `ident` of the passed-in meta item. The ident will be set by the field name of the containing struct.
/// * Implement only the `from_*` methods that you intend to support. The default implementations will return useful errors.
///
/// # Provided Implementations
/// ## bool
///
/// * Word with no value specified - becomes `true`.
/// * As a boolean literal, e.g. `foo = true`.
/// * As a string literal, e.g. `foo = "true"`.
///
/// ## char
/// * As a char literal, e.g. `foo = '#'`.
/// * As a string literal consisting of a single character, e.g. `foo = "#"`.
///
/// ## String
/// * As a string literal, e.g. `foo = "hello"`.
/// * As a raw string literal, e.g. `foo = r#"hello "world""#`.
///
/// ## Number
/// * As a string literal, e.g. `foo = "-25"`.
/// * As an unquoted positive value, e.g. `foo = 404`. Negative numbers must be in quotation marks.
///
/// ## ()
/// * Word with no value specified, e.g. `foo`. This is best used with `Option`.
/// See `darling::util::Flag` for a more strongly-typed alternative.
///
/// ## Option
/// * Any format produces `Some`.
///
/// ## `Result<T, darling::Error>`
/// * Allows for fallible parsing; will populate the target field with the result of the
/// parse attempt.
pub trait FromMeta: Sized {
fn from_nested_meta(item: &NestedMeta) -> Result<Self> {
(match *item {
NestedMeta::Lit(ref lit) => Self::from_value(lit),
NestedMeta::Meta(ref mi) => Self::from_meta(mi),
})
.map_err(|e| e.with_span(item))
}
/// Create an instance from a `syn::Meta` by dispatching to the format-appropriate
/// trait function. This generally should not be overridden by implementers.
///
/// # Error Spans
/// If this method is overridden and can introduce errors that weren't passed up from
/// other `from_meta` calls, the override must call `with_span` on the error using the
/// `item` to make sure that the emitted diagnostic points to the correct location in
/// source code.
fn from_meta(item: &Meta) -> Result<Self> {
(match *item {
Meta::Path(_) => Self::from_word(),
Meta::List(ref value) => Self::from_list(
&value
.nested
.iter()
.cloned()
.collect::<Vec<syn::NestedMeta>>()[..],
),
Meta::NameValue(ref value) => Self::from_value(&value.lit),
})
.map_err(|e| e.with_span(item))
}
/// Create an instance from the presence of the word in the attribute with no
/// additional options specified.
fn from_word() -> Result<Self> {
Err(Error::unsupported_format("word"))
}
/// Create an instance from a list of nested meta items.
#[allow(unused_variables)]
fn from_list(items: &[NestedMeta]) -> Result<Self> {
Err(Error::unsupported_format("list"))
}
/// Create an instance from a literal value of either `foo = "bar"` or `foo("bar")`.
/// This dispatches to the appropriate method based on the type of literal encountered,
/// and generally should not be overridden by implementers.
///
/// # Error Spans
/// If this method is overridden, the override must make sure to add `value`'s span
/// information to the returned error by calling `with_span(value)` on the `Error` instance.
fn from_value(value: &Lit) -> Result<Self> {
(match *value {
Lit::Bool(ref b) => Self::from_bool(b.value),
Lit::Str(ref s) => Self::from_string(&s.value()),
Lit::Char(ref ch) => Self::from_char(ch.value()),
_ => Err(Error::unexpected_lit_type(value)),
})
.map_err(|e| e.with_span(value))
}
/// Create an instance from a char literal in a value position.
#[allow(unused_variables)]
fn from_char(value: char) -> Result<Self> {
Err(Error::unexpected_type("char"))
}
/// Create an instance from a string literal in a value position.
#[allow(unused_variables)]
fn from_string(value: &str) -> Result<Self> {
Err(Error::unexpected_type("string"))
}
/// Create an instance from a bool literal in a value position.
#[allow(unused_variables)]
fn from_bool(value: bool) -> Result<Self> {
Err(Error::unexpected_type("bool"))
}
}
// FromMeta impls for std and syn types.
impl FromMeta for () {
fn from_word() -> Result<Self> {
Ok(())
}
}
impl FromMeta for bool {
fn from_word() -> Result<Self> {
Ok(true)
}
#[allow(clippy::wrong_self_convention)] // false positive
fn from_bool(value: bool) -> Result<Self> {
Ok(value)
}
fn from_string(value: &str) -> Result<Self> {
value.parse().map_err(|_| Error::unknown_value(value))
}
}
impl FromMeta for AtomicBool {
fn from_meta(mi: &Meta) -> Result<Self> {
FromMeta::from_meta(mi)
.map(AtomicBool::new)
.map_err(|e| e.with_span(mi))
}
}
impl FromMeta for char {
#[allow(clippy::wrong_self_convention)] // false positive
fn from_char(value: char) -> Result<Self> {
Ok(value)
}
fn from_string(s: &str) -> Result<Self> {
let mut chars = s.chars();
let char1 = chars.next();
let char2 = chars.next();
if let (Some(char), None) = (char1, char2) {
Ok(char)
} else {
Err(Error::unexpected_type("string"))
}
}
}
impl FromMeta for String {
fn from_string(s: &str) -> Result<Self> {
Ok(s.to_string())
}
}
/// Generate an impl of `FromMeta` that will accept strings which parse to numbers or
/// integer literals.
macro_rules! from_meta_num {
($ty:ident) => {
impl FromMeta for $ty {
fn from_string(s: &str) -> Result<Self> {
s.parse().map_err(|_| Error::unknown_value(s))
}
fn from_value(value: &Lit) -> Result<Self> {
(match *value {
Lit::Str(ref s) => Self::from_string(&s.value()),
Lit::Int(ref s) => Ok(s.base10_parse::<$ty>().unwrap()),
_ => Err(Error::unexpected_lit_type(value)),
})
.map_err(|e| e.with_span(value))
}
}
};
}
from_meta_num!(u8);
from_meta_num!(u16);
from_meta_num!(u32);
from_meta_num!(u64);
from_meta_num!(usize);
from_meta_num!(i8);
from_meta_num!(i16);
from_meta_num!(i32);
from_meta_num!(i64);
from_meta_num!(isize);
/// Generate an impl of `FromMeta` that will accept strings which parse to floats or
/// float literals.
macro_rules! from_meta_float {
($ty:ident) => {
impl FromMeta for $ty {
fn from_string(s: &str) -> Result<Self> {
s.parse().map_err(|_| Error::unknown_value(s))
}
fn from_value(value: &Lit) -> Result<Self> {
(match *value {
Lit::Str(ref s) => Self::from_string(&s.value()),
Lit::Float(ref s) => Ok(s.base10_parse::<$ty>().unwrap()),
_ => Err(Error::unexpected_lit_type(value)),
})
.map_err(|e| e.with_span(value))
}
}
};
}
from_meta_float!(f32);
from_meta_float!(f64);
/// Parsing support for identifiers. This attempts to preserve span information
/// when available, but also supports parsing strings with the call site as the
/// emitted span.
impl FromMeta for syn::Ident {
fn from_string(value: &str) -> Result<Self> {
Ok(syn::Ident::new(value, ::proc_macro2::Span::call_site()))
}
fn from_value(value: &Lit) -> Result<Self> {
if let Lit::Str(ref ident) = *value {
ident
.parse()
.map_err(|_| Error::unknown_lit_str_value(ident))
} else {
Err(Error::unexpected_lit_type(value))
}
}
}
/// Parsing support for punctuated. This attempts to preserve span information
/// when available, but also supports parsing strings with the call site as the
/// emitted span.
impl<T: syn::parse::Parse, P: syn::parse::Parse> FromMeta for syn::punctuated::Punctuated<T, P> {
fn from_value(value: &Lit) -> Result<Self> {
if let Lit::Str(ref ident) = *value {
ident
.parse_with(syn::punctuated::Punctuated::parse_terminated)
.map_err(|_| Error::unknown_lit_str_value(ident))
} else {
Err(Error::unexpected_lit_type(value))
}
}
}
/// Parsing support for an array, i.e. `example = "[1 + 2, 2 - 2, 3 * 4]"`.
impl FromMeta for syn::ExprArray {
fn from_value(value: &Lit) -> Result<Self> {
if let Lit::Str(ref ident) = *value {
ident
.parse::<syn::ExprArray>()
.map_err(|_| Error::unknown_lit_str_value(ident))
} else {
Err(Error::unexpected_lit_type(value))
}
}
}
macro_rules! from_numeric_array {
($ty:ident) => {
/// Parsing an unsigned integer array, i.e. `example = "[1, 2, 3, 4]"`.
impl FromMeta for Vec<$ty> {
fn from_value(value: &Lit) -> Result<Self> {
let expr_array = syn::ExprArray::from_value(value)?;
// To meet rust <1.36 borrow checker rules on expr_array.elems
let v =
expr_array
.elems
.iter()
.map(|expr| match expr {
Expr::Lit(lit) => $ty::from_value(&lit.lit),
_ => Err(Error::custom("Expected array of unsigned integers")
.with_span(expr)),
})
.collect::<Result<Vec<$ty>>>();
v
}
}
};
}
from_numeric_array!(u8);
from_numeric_array!(u16);
from_numeric_array!(u32);
from_numeric_array!(u64);
from_numeric_array!(usize);
/// Parsing support for paths. This attempts to preserve span information when available,
/// but also supports parsing strings with the call site as the emitted span.
impl FromMeta for syn::Path {
fn from_string(value: &str) -> Result<Self> {
syn::parse_str(value).map_err(|_| Error::unknown_value(value))
}
fn from_value(value: &Lit) -> Result<Self> {
if let Lit::Str(ref path_str) = *value {
path_str
.parse()
.map_err(|_| Error::unknown_lit_str_value(path_str))
} else {
Err(Error::unexpected_lit_type(value))
}
}
}
impl FromMeta for syn::Lit {
fn from_value(value: &Lit) -> Result<Self> {
Ok(value.clone())
}
}
macro_rules! from_meta_lit {
($impl_ty:path, $lit_variant:path) => {
impl FromMeta for $impl_ty {
fn from_value(value: &Lit) -> Result<Self> {
if let $lit_variant(ref value) = *value {
Ok(value.clone())
} else {
Err(Error::unexpected_lit_type(value))
}
}
}
};
}
from_meta_lit!(syn::LitInt, Lit::Int);
from_meta_lit!(syn::LitFloat, Lit::Float);
from_meta_lit!(syn::LitStr, Lit::Str);
from_meta_lit!(syn::LitByte, Lit::Byte);
from_meta_lit!(syn::LitByteStr, Lit::ByteStr);
from_meta_lit!(syn::LitChar, Lit::Char);
from_meta_lit!(syn::LitBool, Lit::Bool);
from_meta_lit!(proc_macro2::Literal, Lit::Verbatim);
impl FromMeta for syn::Meta {
fn from_meta(value: &syn::Meta) -> Result<Self> {
Ok(value.clone())
}
}
impl FromMeta for syn::WhereClause {
fn from_string(value: &str) -> Result<Self> {
syn::parse_str(value).map_err(|_| Error::unknown_value(value))
}
}
impl FromMeta for Vec<syn::WherePredicate> {
fn from_string(value: &str) -> Result<Self> {
syn::WhereClause::from_string(&format!("where {}", value))
.map(|c| c.predicates.into_iter().collect())
}
}
impl FromMeta for ident_case::RenameRule {
fn from_string(value: &str) -> Result<Self> {
value.parse().map_err(|_| Error::unknown_value(value))
}
}
impl<T: FromMeta> FromMeta for Option<T> {
fn from_meta(item: &Meta) -> Result<Self> {
FromMeta::from_meta(item).map(Some)
}
}
impl<T: FromMeta> FromMeta for Box<T> {
fn from_meta(item: &Meta) -> Result<Self> {
FromMeta::from_meta(item).map(Box::new)
}
}
impl<T: FromMeta> FromMeta for Result<T> {
fn from_meta(item: &Meta) -> Result<Self> {
Ok(FromMeta::from_meta(item))
}
}
/// Parses the meta-item, and in case of error preserves a copy of the input for
/// later analysis.
impl<T: FromMeta> FromMeta for ::std::result::Result<T, Meta> {
fn from_meta(item: &Meta) -> Result<Self> {
T::from_meta(item)
.map(Ok)
.or_else(|_| Ok(Err(item.clone())))
}
}
impl<T: FromMeta> FromMeta for Rc<T> {
fn from_meta(item: &Meta) -> Result<Self> {
FromMeta::from_meta(item).map(Rc::new)
}
}
impl<T: FromMeta> FromMeta for Arc<T> {
fn from_meta(item: &Meta) -> Result<Self> {
FromMeta::from_meta(item).map(Arc::new)
}
}
impl<T: FromMeta> FromMeta for RefCell<T> {
fn from_meta(item: &Meta) -> Result<Self> {
FromMeta::from_meta(item).map(RefCell::new)
}
}
fn path_to_string(path: &syn::Path) -> String {
path.segments
.iter()
.map(|s| s.ident.to_string())
.collect::<Vec<String>>()
.join("::")
}
/// Trait to convert from a path into an owned key for a map.
trait KeyFromPath: Sized {
fn from_path(path: &syn::Path) -> Result<Self>;
fn to_display(&self) -> Cow<'_, str>;
}
impl KeyFromPath for String {
fn from_path(path: &syn::Path) -> Result<Self> {
Ok(path_to_string(path))
}
fn to_display(&self) -> Cow<'_, str> {
Cow::Borrowed(&self)
}
}
impl KeyFromPath for syn::Path {
fn from_path(path: &syn::Path) -> Result<Self> {
Ok(path.clone())
}
fn to_display(&self) -> Cow<'_, str> {
Cow::Owned(path_to_string(self))
}
}
impl KeyFromPath for syn::Ident {
fn from_path(path: &syn::Path) -> Result<Self> {
if path.segments.len() == 1
&& path.leading_colon.is_none()
&& path.segments[0].arguments.is_empty()
{
Ok(path.segments[0].ident.clone())
} else {
Err(Error::custom("Key must be an identifier").with_span(path))
}
}
fn to_display(&self) -> Cow<'_, str> {
Cow::Owned(self.to_string())
}
}
macro_rules! hash_map {
($key:ty) => {
impl<V: FromMeta, S: BuildHasher + Default> FromMeta for HashMap<$key, V, S> {
fn from_list(nested: &[syn::NestedMeta]) -> Result<Self> {
// Convert the nested meta items into a sequence of (path, value result) result tuples.
// An outer Err means no (key, value) structured could be found, while an Err in the
// second position of the tuple means that value was rejected by FromMeta.
//
// We defer key conversion into $key so that we don't lose span information in the case
// of String keys; we'll need it for good duplicate key errors later.
let pairs = nested
.iter()
.map(|item| -> Result<(&syn::Path, Result<V>)> {
match *item {
syn::NestedMeta::Meta(ref inner) => {
let path = inner.path();
Ok((
path,
FromMeta::from_meta(inner).map_err(|e| e.at_path(&path)),
))
}
syn::NestedMeta::Lit(_) => Err(Error::unsupported_format("literal")),
}
});
let mut errors = vec![];
// We need to track seen keys separately from the final map, since a seen key with an
// Err value won't go into the final map but should trigger a duplicate field error.
//
// This is a set of $key rather than Path to avoid the possibility that a key type
// parses two paths of different values to the same key value.
let mut seen_keys = HashSet::with_capacity(nested.len());
// The map to return in the Ok case. Its size will always be exactly nested.len(),
// since otherwise ≥1 field had a problem and the entire map is dropped immediately
// when the function returns `Err`.
let mut map = HashMap::with_capacity_and_hasher(nested.len(), Default::default());
for item in pairs {
match item {
Ok((path, value)) => {
let key: $key = match KeyFromPath::from_path(path) {
Ok(k) => k,
Err(e) => {
errors.push(e);
// Surface value errors even under invalid keys
if let Err(val_err) = value {
errors.push(val_err);
}
continue;
}
};
let already_seen = seen_keys.contains(&key);
if already_seen {
errors.push(
Error::duplicate_field(&key.to_display()).with_span(path),
);
}
match value {
Ok(_) if already_seen => {}
Ok(val) => {
map.insert(key.clone(), val);
}
Err(e) => {
errors.push(e);
}
}
seen_keys.insert(key);
}
Err(e) => {
errors.push(e);
}
}
}
if !errors.is_empty() {
return Err(Error::multiple(errors));
}
Ok(map)
}
}
};
}
// This is done as a macro rather than a blanket impl to avoid breaking backwards compatibility
// with 0.12.x, while still sharing the same impl.
hash_map!(String);
hash_map!(syn::Ident);
hash_map!(syn::Path);
/// Tests for `FromMeta` implementations. Wherever the word `ignore` appears in test input,
/// it should not be considered by the parsing.
#[cfg(test)]
mod tests {
use proc_macro2::TokenStream;
use crate::{Error, FromMeta, Result};
/// parse a string as a syn::Meta instance.
fn pm(tokens: TokenStream) -> ::std::result::Result<syn::Meta, String> {
let attribute: syn::Attribute = parse_quote!(#[#tokens]);
attribute.parse_meta().map_err(|_| "Unable to parse".into())
}
fn fm<T: FromMeta>(tokens: TokenStream) -> T {
FromMeta::from_meta(&pm(tokens).expect("Tests should pass well-formed input"))
.expect("Tests should pass valid input")
}
#[test]
fn unit_succeeds() {
let () = fm::<()>(quote!(ignore));
}
#[test]
fn bool_succeeds() {
// word format
assert_eq!(fm::<bool>(quote!(ignore)), true);
// bool literal
assert_eq!(fm::<bool>(quote!(ignore = true)), true);
assert_eq!(fm::<bool>(quote!(ignore = false)), false);
// string literals
assert_eq!(fm::<bool>(quote!(ignore = "true")), true);
assert_eq!(fm::<bool>(quote!(ignore = "false")), false);
}
#[test]
fn char_succeeds() {
// char literal
assert_eq!(fm::<char>(quote!(ignore = '😬')), '😬');
// string literal
assert_eq!(fm::<char>(quote!(ignore = "😬")), '😬');
}
#[test]
fn string_succeeds() {
// cooked form
assert_eq!(&fm::<String>(quote!(ignore = "world")), "world");
// raw form
assert_eq!(&fm::<String>(quote!(ignore = r#"world"#)), "world");
}
#[test]
#[allow(clippy::float_cmp)] // we want exact equality
fn number_succeeds() {
assert_eq!(fm::<u8>(quote!(ignore = "2")), 2u8);
assert_eq!(fm::<i16>(quote!(ignore = "-25")), -25i16);
assert_eq!(fm::<f64>(quote!(ignore = "1.4e10")), 1.4e10);
}
#[test]
fn int_without_quotes() {
assert_eq!(fm::<u8>(quote!(ignore = 2)), 2u8);
assert_eq!(fm::<u16>(quote!(ignore = 255)), 255u16);
assert_eq!(fm::<u32>(quote!(ignore = 5000)), 5000u32);
// Check that we aren't tripped up by incorrect suffixes
assert_eq!(fm::<u32>(quote!(ignore = 5000i32)), 5000u32);
}
#[test]
#[allow(clippy::float_cmp)] // we want exact equality
fn float_without_quotes() {
assert_eq!(fm::<f32>(quote!(ignore = 2.)), 2.0f32);
assert_eq!(fm::<f32>(quote!(ignore = 2.0)), 2.0f32);
assert_eq!(fm::<f64>(quote!(ignore = 1.4e10)), 1.4e10f64);
}
#[test]
fn meta_succeeds() {
use syn::Meta;
assert_eq!(
fm::<Meta>(quote!(hello(world, today))),
pm(quote!(hello(world, today))).unwrap()
);
}
#[test]
fn hash_map_succeeds() {
use | k that a `HashMap` cannot have duplicate keys, and that the generated error
/// is assigned a span to correctly target the diagnostic message.
#[test]
fn hash_map_duplicate() {
use std::collections::HashMap;
let err: Result<HashMap<String, bool>> =
FromMeta::from_meta(&pm(quote!(ignore(hello, hello = false))).unwrap());
let err = err.expect_err("Duplicate keys in HashMap should error");
assert!(err.has_span());
assert_eq!(err.to_string(), Error::duplicate_field("hello").to_string());
}
#[test]
fn hash_map_multiple_errors() {
use std::collections::HashMap;
let err = HashMap::<String, bool>::from_meta(
&pm(quote!(ignore(hello, hello = 3, hello = false))).unwrap(),
)
.expect_err("Duplicates and bad values should error");
assert_eq!(err.len(), 3);
let errors = err.into_iter().collect::<Vec<_>>();
assert!(errors[0].has_span());
assert!(errors[1].has_span());
assert!(errors[2].has_span());
}
#[test]
fn hash_map_ident_succeeds() {
use std::collections::HashMap;
use syn::parse_quote;
let comparison = {
let mut c = HashMap::<syn::Ident, bool>::new();
c.insert(parse_quote!(first), true);
c.insert(parse_quote!(second), false);
c
};
assert_eq!(
fm::<HashMap<syn::Ident, bool>>(quote!(ignore(first, second = false))),
comparison
);
}
#[test]
fn hash_map_ident_rejects_non_idents() {
use std::collections::HashMap;
let err: Result<HashMap<syn::Ident, bool>> =
FromMeta::from_meta(&pm(quote!(ignore(first, the::second))).unwrap());
err.unwrap_err();
}
#[test]
fn hash_map_path_succeeds() {
use std::collections::HashMap;
use syn::parse_quote;
let comparison = {
let mut c = HashMap::<syn::Path, bool>::new();
c.insert(parse_quote!(first), true);
c.insert(parse_quote!(the::second), false);
c
};
assert_eq!(
fm::<HashMap<syn::Path, bool>>(quote!(ignore(first, the::second = false))),
comparison
);
}
/// Tests that fallible parsing will always produce an outer `Ok` (from `fm`),
/// and will accurately preserve the inner contents.
#[test]
fn darling_result_succeeds() {
fm::<Result<()>>(quote!(ignore)).unwrap();
fm::<Result<()>>(quote!(ignore(world))).unwrap_err();
}
/// Test punctuated
#[test]
fn test_punctuated() {
fm::<syn::punctuated::Punctuated<syn::FnArg, syn::token::Comma>>(quote!(
ignore = "a: u8, b: Type"
));
fm::<syn::punctuated::Punctuated<syn::Expr, syn::token::Comma>>(quote!(ignore = "a, b, c"));
}
#[test]
fn test_expr_array() {
fm::<syn::ExprArray>(quote!(ignore = "[0x1, 0x2]"));
fm::<syn::ExprArray>(quote!(ignore = "[\"Hello World\", \"Test Array\"]"));
}
#[test]
fn test_number_array() {
assert_eq!(
fm::<Vec<u8>>(quote!(ignore = "[16, 0xff]")),
vec![0x10, 0xff]
);
assert_eq!(
fm::<Vec<u16>>(quote!(ignore = "[32, 0xffff]")),
vec![0x20, 0xffff]
);
assert_eq!(
fm::<Vec<u32>>(quote!(ignore = "[48, 0xffffffff]")),
vec![0x30, 0xffffffff]
);
assert_eq!(
fm::<Vec<u64>>(quote!(ignore = "[64, 0xffffffffffffffff]")),
vec![0x40, 0xffffffffffffffff]
);
assert_eq!(
fm::<Vec<usize>>(quote!(ignore = "[80, 0xffffffff]")),
vec![0x50, 0xffffffff]
);
}
}
| std::collections::HashMap;
let comparison = {
let mut c = HashMap::new();
c.insert("hello".to_string(), true);
c.insert("world".to_string(), false);
c.insert("there".to_string(), true);
c
};
assert_eq!(
fm::<HashMap<String, bool>>(quote!(ignore(hello, world = false, there = "true"))),
comparison
);
}
/// Chec |
destination_test.go | // Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package table
import (
//"fmt"
"fmt"
"net"
"testing"
"time"
"github.com/osrg/gobgp/packet/bgp"
"github.com/stretchr/testify/assert"
)
func TestDestinationNewIPv4(t *testing.T) {
peerD := DestCreatePeer()
pathD := DestCreatePath(peerD)
ipv4d := NewDestination(pathD[0].GetNlri(), 0)
assert.NotNil(t, ipv4d)
}
func TestDestinationNewIPv6(t *testing.T) {
peerD := DestCreatePeer()
pathD := DestCreatePath(peerD)
ipv6d := NewDestination(pathD[0].GetNlri(), 0)
assert.NotNil(t, ipv6d)
}
func TestDestinationSetRouteFamily(t *testing.T) {
dd := &Destination{}
dd.setRouteFamily(bgp.RF_IPv4_UC)
rf := dd.Family()
assert.Equal(t, rf, bgp.RF_IPv4_UC)
}
func TestDestinationGetRouteFamily(t *testing.T) {
dd := &Destination{}
dd.setRouteFamily(bgp.RF_IPv6_UC)
rf := dd.Family()
assert.Equal(t, rf, bgp.RF_IPv6_UC)
}
func TestDestinationSetNlri(t *testing.T) {
dd := &Destination{}
nlri := bgp.NewIPAddrPrefix(24, "13.2.3.1")
dd.setNlri(nlri)
r_nlri := dd.GetNlri()
assert.Equal(t, r_nlri, nlri)
}
func TestDestinationGetNlri(t *testing.T) {
dd := &Destination{}
nlri := bgp.NewIPAddrPrefix(24, "10.110.123.1")
dd.setNlri(nlri)
r_nlri := dd.GetNlri()
assert.Equal(t, r_nlri, nlri)
}
func TestCalculate2(t *testing.T) {
origin := bgp.NewPathAttributeOrigin(0)
aspathParam := []bgp.AsPathParamInterface{bgp.NewAs4PathParam(2, []uint32{65001})}
aspath := bgp.NewPathAttributeAsPath(aspathParam)
nexthop := bgp.NewPathAttributeNextHop("10.0.0.1")
med := bgp.NewPathAttributeMultiExitDisc(0)
pathAttributes := []bgp.PathAttributeInterface{origin, aspath, nexthop, med}
nlri := bgp.NewIPAddrPrefix(24, "10.10.0.0")
// peer1 sends normal update message 10.10.0.0/24
update1 := bgp.NewBGPUpdateMessage(nil, pathAttributes, []*bgp.IPAddrPrefix{nlri})
peer1 := &PeerInfo{AS: 1, Address: net.IP{1, 1, 1, 1}}
path1 := ProcessMessage(update1, peer1, time.Now())[0]
d := NewDestination(nlri, 0)
d.Calculate(path1)
// suppose peer2 sends grammaatically correct but semantically flawed update message
// which has a withdrawal nlri not advertised before
update2 := bgp.NewBGPUpdateMessage([]*bgp.IPAddrPrefix{nlri}, pathAttributes, nil)
peer2 := &PeerInfo{AS: 2, Address: net.IP{2, 2, 2, 2}}
path2 := ProcessMessage(update2, peer2, time.Now())[0]
assert.Equal(t, path2.IsWithdraw, true)
d.Calculate(path2)
// we have a path from peer1 here
assert.Equal(t, len(d.knownPathList), 1)
// after that, new update with the same nlri comes from peer2
update3 := bgp.NewBGPUpdateMessage(nil, pathAttributes, []*bgp.IPAddrPrefix{nlri})
path3 := ProcessMessage(update3, peer2, time.Now())[0]
assert.Equal(t, path3.IsWithdraw, false)
d.Calculate(path3)
// this time, we have paths from peer1 and peer2
assert.Equal(t, len(d.knownPathList), 2)
// now peer3 sends normal update message 10.10.0.0/24
peer3 := &PeerInfo{AS: 3, Address: net.IP{3, 3, 3, 3}}
update4 := bgp.NewBGPUpdateMessage(nil, pathAttributes, []*bgp.IPAddrPrefix{nlri})
path4 := ProcessMessage(update4, peer3, time.Now())[0]
d.Calculate(path4)
// we must have paths from peer1, peer2 and peer3
assert.Equal(t, len(d.knownPathList), 3)
}
func TestMedTieBreaker(t *testing.T) {
nlri := bgp.NewIPAddrPrefix(24, "10.10.0.0")
p0 := func() *Path {
aspath := bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{65001, 65002}), bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{65003, 65004})})
attrs := []bgp.PathAttributeInterface{aspath, bgp.NewPathAttributeMultiExitDisc(0)}
return NewPath(nil, nlri, false, attrs, time.Now(), false)
}()
p1 := func() *Path {
aspath := bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{65001, 65002}), bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{65003, 65005})})
attrs := []bgp.PathAttributeInterface{aspath, bgp.NewPathAttributeMultiExitDisc(10)}
return NewPath(nil, nlri, false, attrs, time.Now(), false)
}()
// same AS
assert.Equal(t, compareByMED(p0, p1), p0)
p2 := func() *Path {
aspath := bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{65003})})
attrs := []bgp.PathAttributeInterface{aspath, bgp.NewPathAttributeMultiExitDisc(10)}
return NewPath(nil, nlri, false, attrs, time.Now(), false)
}()
// different AS
assert.Equal(t, compareByMED(p0, p2), (*Path)(nil))
p3 := func() *Path {
aspath := bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SEQ, []uint32{65003, 65004}), bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{65001, 65003})})
attrs := []bgp.PathAttributeInterface{aspath, bgp.NewPathAttributeMultiExitDisc(0)}
return NewPath(nil, nlri, false, attrs, time.Now(), false)
}()
p4 := func() *Path {
aspath := bgp.NewPathAttributeAsPath([]bgp.AsPathParamInterface{bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_SEQ, []uint32{65001, 65002}), bgp.NewAs4PathParam(bgp.BGP_ASPATH_ATTR_TYPE_CONFED_SEQ, []uint32{65005, 65006})})
attrs := []bgp.PathAttributeInterface{aspath, bgp.NewPathAttributeMultiExitDisc(10)}
return NewPath(nil, nlri, false, attrs, time.Now(), false)
}()
// ignore confed
assert.Equal(t, compareByMED(p3, p4), p3)
p5 := func() *Path {
attrs := []bgp.PathAttributeInterface{bgp.NewPathAttributeMultiExitDisc(0)}
return NewPath(nil, nlri, false, attrs, time.Now(), false)
}()
p6 := func() *Path {
attrs := []bgp.PathAttributeInterface{bgp.NewPathAttributeMultiExitDisc(10)}
return NewPath(nil, nlri, false, attrs, time.Now(), false)
}()
// no aspath
assert.Equal(t, compareByMED(p5, p6), p5)
}
func TestTimeTieBreaker(t *testing.T) {
origin := bgp.NewPathAttributeOrigin(0)
aspathParam := []bgp.AsPathParamInterface{bgp.NewAs4PathParam(2, []uint32{65001})}
aspath := bgp.NewPathAttributeAsPath(aspathParam)
nexthop := bgp.NewPathAttributeNextHop("10.0.0.1")
med := bgp.NewPathAttributeMultiExitDisc(0)
pathAttributes := []bgp.PathAttributeInterface{origin, aspath, nexthop, med}
nlri := bgp.NewIPAddrPrefix(24, "10.10.0.0")
updateMsg := bgp.NewBGPUpdateMessage(nil, pathAttributes, []*bgp.IPAddrPrefix{nlri})
peer1 := &PeerInfo{AS: 2, LocalAS: 1, Address: net.IP{1, 1, 1, 1}, ID: net.IP{1, 1, 1, 1}}
path1 := ProcessMessage(updateMsg, peer1, time.Now())[0]
peer2 := &PeerInfo{AS: 2, LocalAS: 1, Address: net.IP{2, 2, 2, 2}, ID: net.IP{2, 2, 2, 2}} // weaker router-id
path2 := ProcessMessage(updateMsg, peer2, time.Now().Add(-1*time.Hour))[0] // older than path1
d := NewDestination(nlri, 0)
d.Calculate(path1)
d.Calculate(path2)
assert.Equal(t, len(d.knownPathList), 2)
assert.Equal(t, true, d.GetBestPath("", 0).GetSource().ID.Equal(net.IP{2, 2, 2, 2})) // path from peer2 win
// this option disables tie breaking by age
SelectionOptions.ExternalCompareRouterId = true
d = NewDestination(nlri, 0)
d.Calculate(path1)
d.Calculate(path2)
assert.Equal(t, len(d.knownPathList), 2)
assert.Equal(t, true, d.GetBestPath("", 0).GetSource().ID.Equal(net.IP{1, 1, 1, 1})) // path from peer1 win
}
func DestCreatePeer() []*PeerInfo {
peerD1 := &PeerInfo{AS: 65000}
peerD2 := &PeerInfo{AS: 65001}
peerD3 := &PeerInfo{AS: 65002}
peerD := []*PeerInfo{peerD1, peerD2, peerD3}
return peerD
}
func DestCreatePath(peerD []*PeerInfo) []*Path {
bgpMsgD1 := updateMsgD1()
bgpMsgD2 := updateMsgD2()
bgpMsgD3 := updateMsgD3()
pathD := make([]*Path, 3)
for i, msg := range []*bgp.BGPMessage{bgpMsgD1, bgpMsgD2, bgpMsgD3} {
updateMsgD := msg.Body.(*bgp.BGPUpdate)
nlriList := updateMsgD.NLRI
pathAttributes := updateMsgD.PathAttributes
nlri_info := nlriList[0]
pathD[i] = NewPath(peerD[i], nlri_info, false, pathAttributes, time.Now(), false)
}
return pathD
}
func updateMsgD1() *bgp.BGPMessage {
origin := bgp.NewPathAttributeOrigin(0)
aspathParam := []bgp.AsPathParamInterface{bgp.NewAsPathParam(2, []uint16{65000})}
aspath := bgp.NewPathAttributeAsPath(aspathParam)
nexthop := bgp.NewPathAttributeNextHop("192.168.50.1")
med := bgp.NewPathAttributeMultiExitDisc(0)
pathAttributes := []bgp.PathAttributeInterface{
origin,
aspath,
nexthop,
med,
}
nlri := []*bgp.IPAddrPrefix{bgp.NewIPAddrPrefix(24, "10.10.10.0")}
updateMsg := bgp.NewBGPUpdateMessage(nil, pathAttributes, nlri)
UpdatePathAttrs4ByteAs(updateMsg.Body.(*bgp.BGPUpdate))
return updateMsg
}
func updateMsgD2() *bgp.BGPMessage {
origin := bgp.NewPathAttributeOrigin(0)
aspathParam := []bgp.AsPathParamInterface{bgp.NewAsPathParam(2, []uint16{65100})}
aspath := bgp.NewPathAttributeAsPath(aspathParam)
nexthop := bgp.NewPathAttributeNextHop("192.168.100.1")
med := bgp.NewPathAttributeMultiExitDisc(100)
pathAttributes := []bgp.PathAttributeInterface{
origin,
aspath,
nexthop,
med,
}
nlri := []*bgp.IPAddrPrefix{bgp.NewIPAddrPrefix(24, "20.20.20.0")}
updateMsg := bgp.NewBGPUpdateMessage(nil, pathAttributes, nlri)
UpdatePathAttrs4ByteAs(updateMsg.Body.(*bgp.BGPUpdate))
return updateMsg
}
func updateMsgD3() *bgp.BGPMessage {
origin := bgp.NewPathAttributeOrigin(0)
aspathParam := []bgp.AsPathParamInterface{bgp.NewAsPathParam(2, []uint16{65100})}
aspath := bgp.NewPathAttributeAsPath(aspathParam)
nexthop := bgp.NewPathAttributeNextHop("192.168.150.1")
med := bgp.NewPathAttributeMultiExitDisc(100)
pathAttributes := []bgp.PathAttributeInterface{
origin,
aspath,
nexthop,
med,
}
nlri := []*bgp.IPAddrPrefix{bgp.NewIPAddrPrefix(24, "30.30.30.0")}
w1 := bgp.NewIPAddrPrefix(23, "40.40.40.0")
withdrawnRoutes := []*bgp.IPAddrPrefix{w1}
updateMsg := bgp.NewBGPUpdateMessage(withdrawnRoutes, pathAttributes, nlri)
UpdatePathAttrs4ByteAs(updateMsg.Body.(*bgp.BGPUpdate))
return updateMsg
}
func TestRadixkey(t *testing.T) {
assert.Equal(t, "000010100000001100100000", CidrToRadixkey("10.3.32.0/24"))
assert.Equal(t, "000010100000001100100000", IpToRadixkey(net.ParseIP("10.3.32.0").To4(), 24))
assert.Equal(t, "000010100000001100100000", IpToRadixkey(net.ParseIP("10.3.32.0").To4(), 24))
assert.Equal(t, CidrToRadixkey("::ffff:0.0.0.0/96")+"000010100000001100100000", CidrToRadixkey("::ffff:10.3.32.0/120"))
}
func TestIpToRadixkey(t *testing.T) {
for i := byte(0); i < 255; i += 3 {
for y := byte(1); y < 128; y *= 2 {
ip := net.IPv4(i, i+2, i+3, i-y)
for n := uint8(16); n <= 32; n += 2 {
exp := CidrToRadixkey(fmt.Sprintf("%v/%d", ip.To4(), n))
got := IpToRadixkey(ip.To4(), n)
if exp != got {
t.Fatalf(`exp %v; got %v`, exp, got)
}
}
for n := uint8(116); n <= 128; n += 2 {
exp := CidrToRadixkey(fmt.Sprintf("::ffff:%v/%d", ip.To16(), n))
got := IpToRadixkey(ip.To16(), n)
if exp != got {
t.Fatalf(`exp %v; got %v`, exp, got)
}
}
}
}
}
func TestMultipath(t *testing.T) {
UseMultiplePaths.Enabled = true
origin := bgp.NewPathAttributeOrigin(0)
aspathParam := []bgp.AsPathParamInterface{bgp.NewAs4PathParam(2, []uint32{65000})}
aspath := bgp.NewPathAttributeAsPath(aspathParam)
nexthop := bgp.NewPathAttributeNextHop("192.168.150.1")
med := bgp.NewPathAttributeMultiExitDisc(100)
pathAttributes := []bgp.PathAttributeInterface{
origin,
aspath,
nexthop,
med,
}
nlri := []*bgp.IPAddrPrefix{bgp.NewIPAddrPrefix(24, "10.10.10.0")}
updateMsg := bgp.NewBGPUpdateMessage(nil, pathAttributes, nlri)
peer1 := &PeerInfo{AS: 1, Address: net.IP{1, 1, 1, 1}, ID: net.IP{1, 1, 1, 1}}
path1 := ProcessMessage(updateMsg, peer1, time.Now())[0]
peer2 := &PeerInfo{AS: 2, Address: net.IP{2, 2, 2, 2}, ID: net.IP{2, 2, 2, 2}}
med = bgp.NewPathAttributeMultiExitDisc(100)
nexthop = bgp.NewPathAttributeNextHop("192.168.150.2")
pathAttributes = []bgp.PathAttributeInterface{
origin,
aspath,
nexthop,
med,
}
updateMsg = bgp.NewBGPUpdateMessage(nil, pathAttributes, nlri)
path2 := ProcessMessage(updateMsg, peer2, time.Now())[0]
d := NewDestination(nlri[0], 0)
d.Calculate(path2)
best, old, multi := d.Calculate(path1).GetChanges(GLOBAL_RIB_NAME, 0, false)
assert.NotNil(t, best)
assert.Equal(t, old, path2)
assert.Equal(t, len(multi), 2)
assert.Equal(t, len(d.GetKnownPathList(GLOBAL_RIB_NAME, 0)), 2)
path3 := path2.Clone(true)
dd := d.Calculate(path3)
best, old, multi = dd.GetChanges(GLOBAL_RIB_NAME, 0, false)
assert.Nil(t, best)
assert.Equal(t, old, path1)
assert.Equal(t, len(multi), 1)
assert.Equal(t, len(d.GetKnownPathList(GLOBAL_RIB_NAME, 0)), 1)
peer3 := &PeerInfo{AS: 3, Address: net.IP{3, 3, 3, 3}, ID: net.IP{3, 3, 3, 3}}
med = bgp.NewPathAttributeMultiExitDisc(50)
nexthop = bgp.NewPathAttributeNextHop("192.168.150.3")
pathAttributes = []bgp.PathAttributeInterface{
origin,
aspath,
nexthop,
med,
}
updateMsg = bgp.NewBGPUpdateMessage(nil, pathAttributes, nlri)
path4 := ProcessMessage(updateMsg, peer3, time.Now())[0]
dd = d.Calculate(path4)
best, _, multi = dd.GetChanges(GLOBAL_RIB_NAME, 0, false)
assert.NotNil(t, best)
assert.Equal(t, len(multi), 2)
assert.Equal(t, len(d.GetKnownPathList(GLOBAL_RIB_NAME, 0)), 2)
nexthop = bgp.NewPathAttributeNextHop("192.168.150.2")
pathAttributes = []bgp.PathAttributeInterface{
origin,
aspath,
nexthop,
med,
}
updateMsg = bgp.NewBGPUpdateMessage(nil, pathAttributes, nlri)
path5 := ProcessMessage(updateMsg, peer2, time.Now())[0]
best, _, multi = d.Calculate(path5).GetChanges(GLOBAL_RIB_NAME, 0, false)
assert.NotNil(t, best)
assert.Equal(t, len(multi), 3)
assert.Equal(t, len(d.GetKnownPathList(GLOBAL_RIB_NAME, 0)), 3)
UseMultiplePaths.Enabled = false
}
func TestIdMap(t *testing.T) {
d := NewDestination(bgp.NewIPAddrPrefix(24, "10.10.0.101"), 64)
for i := 0; ; i++ {
if id, err := d.localIdMap.FindandSetZeroBit(); err == nil {
assert.Equal(t, uint(i+1), id)
} else {
assert.Equal(t, i, 63)
break
}
}
d.localIdMap.Expand()
for i := 0; i < 64; i++ {
id, _ := d.localIdMap.FindandSetZeroBit()
assert.Equal(t, id, uint(64+i))
}
_, err := d.localIdMap.FindandSetZeroBit()
assert.NotNil(t, err)
}
func TestGetWithdrawnPath(t *testing.T) | {
attrs := []bgp.PathAttributeInterface{
bgp.NewPathAttributeOrigin(0),
}
p1 := NewPath(nil, bgp.NewIPAddrPrefix(24, "13.2.3.0"), false, attrs, time.Now(), false)
p2 := NewPath(nil, bgp.NewIPAddrPrefix(24, "13.2.4.0"), false, attrs, time.Now(), false)
p3 := NewPath(nil, bgp.NewIPAddrPrefix(24, "13.2.5.0"), false, attrs, time.Now(), false)
u := &Update{
KnownPathList: []*Path{p2},
OldKnownPathList: []*Path{p1, p2, p3},
}
l := u.GetWithdrawnPath()
assert.Equal(t, len(l), 2)
assert.Equal(t, l[0].GetNlri(), p1.GetNlri())
} |
|
base.go | package mch
import (
"errors"
"fmt"
"strconv"
"time"
"github.com/arden/wechat/utils"
)
// WXMch 微信商户
type WXMch struct {
AppID string
MchID string
ApiKey string
Client *utils.HTTPClient
SSLClient *utils.HTTPClient
}
// Order returns new order
func (wx *WXMch) Order(options ...utils.HTTPRequestOption) *Order {
return &Order{
mch: wx,
options: options,
}
}
// Refund returns new refund
func (wx *WXMch) Refund(options ...utils.HTTPRequestOption) *Refund {
return &Refund{
mch: wx,
options: options,
}
}
// Pappay returns new pappay
func (wx *WXMch) Pappay(options ...utils.HTTPRequestOption) *Pappay {
return &Pappay{
mch: wx,
options: options,
}
}
// Transfer returns new transfer
func (wx *WXMch) Transfer(options ...utils.HTTPRequestOption) *Transfer {
return &Transfer{
mch: wx,
options: options,
}
}
// Redpack returns new redpack
func (wx *WXMch) Redpack(options ...utils.HTTPRequestOption) *Redpack {
return &Redpack{
mch: wx,
options: options,
}
}
// APPAPI 用于APP拉起支付
func (wx *WXMch) APPAPI(prepayID string) utils.WXML {
ch := utils.WXML{
"appid": wx.AppID,
"partnerid": wx.MchID,
"prepayid": prepayID,
"package": "Sign=WXPay",
"noncestr": utils.NonceStr(),
"timestamp": strconv.FormatInt(time.Now().Unix(), 10),
}
ch["sign"] = SignWithMD5(ch, wx.ApiKey)
return ch
}
// JSAPI 用于JS拉起支付
func (wx *WXMch) JSAPI(prepayID string) utils.WXML {
ch := utils.WXML{
"appId": wx.AppID,
"nonceStr": utils.NonceStr(),
"package": fmt.Sprintf("prepay_id=%s", prepayID),
"signType": SignMD5,
"timeStamp": strconv.FormatInt(time.Now().Unix(), 10),
}
ch["paySign"] = SignWithMD5(ch, wx.ApiKey)
return ch
}
// VerifyWXReply 验证微信结果
func (wx *WXMch) VerifyWXReply(reply utils.WXML) error {
if wxsign, ok := reply["sign"]; ok {
signType := SignMD5
if v, ok := reply["sign_type"]; ok {
signType = v
}
signature := ""
switch signType {
case SignMD5:
signature = SignWithMD5(reply, wx.ApiKey)
case SignHMacSHA256:
signature = SignWithHMacSHA256(reply, wx.ApiKey)
default:
return fmt.Errorf("invalid sign type: %s", signType)
}
if wxsign != signature {
return fmt.Errorf("signature verified failed, want: %s, got: %s", signature, wxsign)
}
}
if appid, ok := reply["appid"]; ok {
if appid != wx.AppID {
return fmt.Errorf("appid mismatch, want: %s, got: %s", wx.AppID, reply["appid"])
}
}
if mchid, ok := reply["mch_id"]; ok {
if mchid != wx.MchID {
return fmt.Errorf("mchid mismatch, want: %s, got: %s", wx.MchID, reply["mch_id"])
}
}
return nil
}
// RSAPublicKey 获取RSA加密公钥
func (wx *WXMch) RSAPublicKey(options ...utils.HTTPRequestOption) ([]byte, error) {
body := utils.WXML{
"mch_id": wx.MchID,
"nonce_str": utils.NonceStr(),
"sign_type": SignMD5,
}
body["sign"] = SignWithMD5(body, wx.ApiKey)
resp, err := wx.SSLClient.PostXML(TransferBalanceOrderQueryURL, body, options...)
if err != nil {
return nil, err
}
if resp["return_code"] != ResultSuccess {
return nil, errors.New(resp["return_msg"])
}
if err := wx.VerifyWXReply(resp); err != nil {
return nil, err
}
pubKey, ok := resp["pub_key"]
if !ok {
return nil, errors.New("empty pub_key")
}
return | []byte(pubKey), nil
}
|
|
boarding_pass.rs | pub fn | (line: &str) -> u32 {
let row = calc_row(&line);
let seat = calc_seat(&line);
row * 8 + seat
}
fn calc_row(line: &str) -> u32 {
let mut min = 0;
let mut max = 127;
for i in 0..7 {
let character = line.chars().nth(i).unwrap();
if character == 'B' {
min = (min + max) / 2 + 1;
} else if character == 'F' {
max = (min + max) / 2;
} else {
panic!("Unexpected char");
}
}
return min;
}
fn calc_seat(line: &str) -> u32 {
let mut min = 0;
let mut max = 7;
for i in 7..10 {
let character = line.chars().nth(i).unwrap();
if character == 'R' {
min = (min + max) / 2 + 1;
} else if character == 'L' {
max = (min + max) / 2;
} else {
panic!("Uexpected char");
}
}
return min;
} | calc_id |
pde.py | """
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from itertools import combinations_with_replacement
from sympy.simplify import simplify # type: ignore
from sympy.core import Add, S
from sympy.core.compatibility import reduce, is_sequence
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func, hint=hint, simplify=True,
type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'], hints['func'],
hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), _xi_1, 4*x - 3*y))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form:
.. math::
f(x, y) = F(- a y + b x ) e^{- \frac{c (a x + b y)}{a^2 + b^2}}
and can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y}
+ c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is:
.. math::
f(x, y) = \left. \left[F(\eta) + \frac{1}{a^2 + b^2}
\int\limits^{a x + b y} G\left(\frac{a \xi + b \eta}{a^2 + b^2},
\frac{- a \eta + b \xi}{a^2 + b^2} \right)
e^{\frac{c \xi}{a^2 + b^2}}\, d\xi\right]
e^{- \frac{c \xi}{a^2 + b^2}}
\right|_{\substack{\eta=- a y + b x\\ \xi=a x + b y }}\, ,
where `F(\eta)` is an arbitrary single-valued function. The solution
can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u - G(x,y)
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// a*x + b*y \
|| / |
|| | |
|| | c*xi |
|| | ------- | | || | 2 2 |
|| | /a*xi + b*eta -a*eta + b*xi\ a + b |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ a + b a + b / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ a + b /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-c*xi ||
-------||
2 2||
a + b ||
e ||
||
/|eta=-a*y + b*x, xi=a*x + b*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial
differential equation is
.. math:: a(x, y) \frac{\partial f(x, y)}{\partial x}
+ b(x, y) \frac{\partial f(x, y)}{\partial y}
+ c(x, y) f(x, y) = G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary
functions in `x` and `y`. This PDE is converted into an ODE by
making the following transformation:
1. `\xi` as `x`
2. `\eta` as the constant in the solution to the differential
equation `\frac{dy}{dx} = -\frac{b}{a}`
Making the previous substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - G(\xi, \eta) = 0
which can be solved using ``dsolve``.
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
for key, sym in enumerate(syms):
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), (x, 2))/X(x), Derivative(T(t), (t, 2))/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), (x, 2))/X(x), Derivative(Y(y), (y, 2))/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs] | |
OsuDBReader.ts | import { Beatmap, OsuDB, StarRatings, TimingPoint } from "./DatabaseTypes";
import { Reader } from "./DatabaseReader";
export class OsuDBReader extends Reader {
readStarRatings(): StarRatings {
const count = this.readInt();
const list: StarRatings = [];
for (let i = 0; i < count; i++) {
this.readByte(); // === 0x08
const mods = this.readInt();
this.readByte(); // === 0x0d
const stars = this.readDouble();
list.push([mods, stars]);
}
return list;
}
readTimingPoints(): TimingPoint[] {
const count = this.readInt();
const list: TimingPoint[] = [];
for (let i = 0; i < count; i++) {
const bpm = this.readDouble();
const offset = this.readDouble();
const inherited = this.readBoolean();
list.push({ bpm, offset, inherited });
}
return list;
}
readBeatmap(version: number): Beatmap {
// TODO:
const bytesOfBeatmapEntry = version <= 20191107 ? this.readInt() : 0;
const artist: string = this.readString();
const artistUnicode: string = this.readString();
const title: string = this.readString();
const titleUnicode: string = this.readString();
const creator: string = this.readString();
const difficulty: string = this.readString(); | const md5Hash: string = this.readString();
const fileName: string = this.readString();
const rankedStatus: number = this.readByte();
const circlesCount: number = this.readShort();
const slidersCount: number = this.readShort();
const spinnersCount: number = this.readShort();
const lastModifiedTime: bigint = this.readLong();
// This is relevant since we will read either 1 byte or 4 bytes.
const difficultyReader = () => (version <= 20140609 ? this.readByte() : this.readSingle());
const approachRate: number = difficultyReader();
const circleSize: number = difficultyReader();
const hpDrain: number = difficultyReader();
const overallDifficulty: number = difficultyReader();
const sliderVelocity: number = this.readDouble();
const starRatings = () => (version >= 20140609 ? this.readStarRatings() : []);
const stdStarRatings: StarRatings = starRatings();
const taikoStarRatings: StarRatings = starRatings();
const catchStarRatings: StarRatings = starRatings();
const maniaStarRatings: StarRatings = starRatings();
const drainTime: number = this.readInt();
const totalTime: number = this.readInt();
const audioPreviewTime: number = this.readInt();
const timingPoints: TimingPoint[] = this.readTimingPoints();
const beatmapId: number = this.readInt();
const beatmapSetId: number = this.readInt();
const threadId: number = this.readInt();
const stdGrade: number = this.readByte();
const taikoGrade: number = this.readByte();
const ctbGrade: number = this.readByte();
const maniaGrade: number = this.readByte();
const localOffset: number = this.readShort();
const stackLeniency: number = this.readSingle();
const gameplayMode: number = this.readByte();
const source = this.readString();
const tags = this.readString();
const offset: number = this.readShort();
const titleFont = this.readString();
const isUnplayed: boolean = this.readBoolean();
const lastPlayed: bigint = this.readDateTime(); // readDateTime() or readLong()? on wiki it says Long
const isOsz2: boolean = this.readBoolean();
const folderName = this.readString();
const lastCheckedAgainstOsuRepo = this.readDateTime();
const ignoreBeatmapSound: boolean = this.readBoolean();
const ignoreBeatmapSkin: boolean = this.readBoolean();
const disableStoryboard: boolean = this.readBoolean();
const disableVideo: boolean = this.readBoolean();
const visualOverride: boolean = this.readBoolean();
if (version <= 20140609) this.readShort();
const lastModificationTime = this.readInt(); // ? There is already a last modified time above
const maniaScrollSpeed = this.readByte();
return {
bytesOfBeatmapEntry,
artist,
artistUnicode,
title,
titleUnicode,
creator,
difficulty,
audioFileName,
md5Hash,
fileName,
rankedStatus,
circlesCount,
slidersCount,
spinnersCount,
lastModifiedTime,
approachRate,
circleSize,
hpDrain,
overallDifficulty,
sliderVelocity,
stdStarRatings,
taikoStarRatings,
catchStarRatings,
maniaStarRatings,
drainTime,
totalTime,
audioPreviewTime,
timingPoints,
beatmapId,
beatmapSetId,
threadId,
stdGrade,
taikoGrade,
ctbGrade,
maniaGrade,
localOffset,
stackLeniency,
gameplayMode,
source,
tags,
offset,
titleFont,
isUnplayed,
lastPlayed,
isOsz2,
folderName,
lastCheckedAgainstOsuRepo,
ignoreBeatmapSound,
ignoreBeatmapSkin,
disableStoryboard,
disableVideo,
visualOverride,
maniaScrollSpeed,
};
}
readBeatmaps = (count: number, version: number) => {
const beatmaps: Beatmap[] = [];
// count = 1;
for (let i = 0; i < count; i++) {
beatmaps.push(this.readBeatmap(version) as Beatmap);
}
return beatmaps;
};
readOsuDB = async (): Promise<OsuDB> => {
const osuVersion = this.readInt();
const folderCount = this.readInt();
const accountIsUnlocked = this.readBoolean();
const accountUnlockDate = this.readDateTime();
const playerName = this.readString();
const numberOfBeatmaps = this.readInt();
const beatmaps = this.readBeatmaps(numberOfBeatmaps, osuVersion);
const userPermissions = this.readInt();
return {
osuVersion,
folderCount,
accountIsUnlocked,
accountUnlockDate,
playerName,
numberOfBeatmaps,
beatmaps,
userPermissions,
};
};
} | const audioFileName: string = this.readString(); |
task.py | import pandas as pd
from autokeras import auto_model
from autokeras.hypermodel import head
from autokeras.hypermodel import node
class SupervisedImagePipeline(auto_model.AutoModel):
def __init__(self, outputs, **kwargs):
super().__init__(inputs=node.ImageInput(),
outputs=outputs,
**kwargs)
class ImageClassifier(SupervisedImagePipeline):
"""AutoKeras image classification class.
# Arguments
num_classes: Int. Defaults to None. If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of Keras metrics. Defaults to use 'accuracy'.
name: String. The name of the AutoModel. Defaults to 'image_classifier'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='image_classifier',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class ImageRegressor(SupervisedImagePipeline):
"""AutoKeras image regression class.
# Arguments
output_dim: Int. The number of output dimensions. Defaults to None.
If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
name: String. The name of the AutoModel. Defaults to 'image_regressor'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
output_dim=None,
loss=None,
metrics=None,
name='image_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class SupervisedTextPipeline(auto_model.AutoModel):
def __init__(self, outputs, **kwargs):
super().__init__(inputs=node.TextInput(),
outputs=outputs,
**kwargs)
class TextClassifier(SupervisedTextPipeline):
"""AutoKeras text classification class.
# Arguments
num_classes: Int. Defaults to None. If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of Keras metrics. Defaults to use 'accuracy'.
name: String. The name of the AutoModel. Defaults to 'text_classifier'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='text_classifier',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class TextRegressor(SupervisedTextPipeline):
"""AutoKeras text regression class.
# Arguments
output_dim: Int. The number of output dimensions. Defaults to None.
If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
name: String. The name of the AutoModel. Defaults to 'text_regressor'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
output_dim=None,
loss=None,
metrics=None,
name='text_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
class SupervisedStructuredDataPipeline(auto_model.AutoModel):
def __init__(self, outputs, column_names, column_types, **kwargs):
inputs = node.StructuredDataInput()
inputs.column_types = column_types
inputs.column_names = column_names
if column_types:
for column_type in column_types.values():
if column_type not in ['categorical', 'numerical']:
raise ValueError(
'Column_types should be either "categorical" '
'or "numerical", but got {name}'.format(name=column_type))
if column_names and column_types:
for column_name in column_types:
if column_name not in column_names:
raise ValueError('Column_names and column_types are '
'mismatched. Cannot find column name '
'{name} in the data.'.format(name=column_name))
super().__init__(inputs=inputs,
outputs=outputs,
**kwargs)
self._target_col_name = None
def _read_from_csv(self, x, y):
df = pd.read_csv(x)
target = df.pop(y).to_numpy()
return df, target
def fit(self,
x=None,
y=None,
epochs=None,
callbacks=None,
validation_split=0,
validation_data=None,
**kwargs):
"""Search for the best model and hyperparameters for the task.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Training data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the training data.
y: String, numpy.ndarray, or tensorflow.Dataset. Training data y.
If the data is from a csv file, it should be a string corresponding
to the label column.
epochs: Int. The number of epochs to train each model during the search.
If unspecified, we would use epochs equal to 1000 and early stopping
with patience equal to 30.
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset.
The best model found would be fit on the entire dataset including the
validation data.
validation_data: Data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
`validation_data` will override `validation_split`. The type of the
validation data should be the same as the training data.
The best model found would be fit on the training dataset without the
validation data.
**kwargs: Any arguments supported by keras.Model.fit.
"""
# x is file path of training data
if isinstance(x, str):
self._target_column_name = y
x, y = self._read_from_csv(x, y)
if validation_data:
x_val, y_val = validation_data
if isinstance(x_val, str):
validation_data = self._read_from_csv(x_val, y_val)
super().fit(x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs)
def predict(self, x, batch_size=32, **kwargs):
"""Predict the output for a given testing data.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Testing data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the testing data.
batch_size: Int. Defaults to 32.
**kwargs: Any arguments supported by keras.Model.predict.
# Returns
A list of numpy.ndarray objects or a single numpy.ndarray.
The predicted results.
"""
if isinstance(x, str):
x = pd.read_csv(x)
if self._target_col_name in x:
x.pop(self._target_col_name) | **kwargs)
def evaluate(self, x, y=None, batch_size=32, **kwargs):
"""Evaluate the best model for the given data.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Testing data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the testing data.
y: String, numpy.ndarray, or tensorflow.Dataset. Testing data y.
If the data is from a csv file, it should be a string corresponding
to the label column.
batch_size: Int. Defaults to 32.
**kwargs: Any arguments supported by keras.Model.evaluate.
# Returns
Scalar test loss (if the model has a single output and no metrics) or
list of scalars (if the model has multiple outputs and/or metrics).
The attribute model.metrics_names will give you the display labels for
the scalar outputs.
"""
if isinstance(x, str):
x, y = self._read_from_csv(x, y)
return super().evaluate(x=x,
y=y,
batch_size=batch_size,
**kwargs)
class StructuredDataClassifier(SupervisedStructuredDataPipeline):
"""AutoKeras structured data classification class.
# Arguments
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the data.
Defaults to None. If None, it will obtained from the header of the csv
file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should either
be 'numerical' or 'categorical', indicating the type of that column.
Defaults to None. If not None, the column_names need to be specified.
If None, it will be inferred from the data.
num_classes: Int. Defaults to None. If None, it will infer from the data.
multi_label: Boolean. Defaults to False.
loss: A Keras loss function. Defaults to use 'binary_crossentropy' or
'categorical_crossentropy' based on the number of classes.
metrics: A list of Keras metrics. Defaults to use 'accuracy'.
name: String. The name of the AutoModel. Defaults to
'structured_data_classifier'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize. Defaults to 'val_accuracy'.
seed: Int. Random seed.
"""
def __init__(self,
column_names=None,
column_types=None,
num_classes=None,
multi_label=False,
loss=None,
metrics=None,
name='structured_data_classifier',
max_trials=100,
directory=None,
objective='val_accuracy',
seed=None):
super().__init__(
outputs=head.ClassificationHead(num_classes=num_classes,
multi_label=multi_label,
loss=loss,
metrics=metrics),
column_names=column_names,
column_types=column_types,
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed)
def fit(self,
x=None,
y=None,
epochs=None,
callbacks=None,
validation_split=0,
validation_data=None,
**kwargs):
"""Search for the best model and hyperparameters for the task.
# Arguments
x: String, numpy.ndarray, pandas.DataFrame or tensorflow.Dataset.
Training data x. If the data is from a csv file, it should be a
string specifying the path of the csv file of the training data.
y: String, numpy.ndarray, or tensorflow.Dataset. Training data y.
If the data is from a csv file, it should be a string corresponding
to the label column.
epochs: Int. The number of epochs to train each model during the search.
If unspecified, we would use epochs equal to 1000 and early stopping
with patience equal to 30.
callbacks: List of Keras callbacks to apply during training and
validation.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset.
validation_data: Data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
`validation_data` will override `validation_split`. The type of the
validation data should be the same as the training data.
**kwargs: Any arguments supported by keras.Model.fit.
"""
super().fit(x=x,
y=y,
epochs=epochs,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
**kwargs)
class StructuredDataRegressor(SupervisedStructuredDataPipeline):
"""AutoKeras structured data regression class.
# Arguments
column_names: A list of strings specifying the names of the columns. The
length of the list should be equal to the number of columns of the data.
Defaults to None. If None, it will obtained from the header of the csv
file or the pandas.DataFrame.
column_types: Dict. The keys are the column names. The values should either
be 'numerical' or 'categorical', indicating the type of that column.
Defaults to None. If not None, the column_names need to be specified.
If None, it will be inferred from the data.
loss: A Keras loss function. Defaults to use 'mean_squared_error'.
metrics: A list of Keras metrics. Defaults to use 'mean_squared_error'.
max_trials: Int. The maximum number of different Keras Models to try.
The search may finish before reaching the max_trials. Defaults to 100.
directory: String. The path to a directory for storing the search outputs.
Defaults to None, which would create a folder with the name of the
AutoModel in the current directory.
objective: String. Name of model metric to minimize
or maximize, e.g. 'val_accuracy'. Defaults to 'val_loss'.
seed: Int. Random seed.
"""
def __init__(self,
column_names=None,
column_types=None,
output_dim=None,
loss=None,
metrics=None,
name='structured_data_regressor',
max_trials=100,
directory=None,
objective='val_loss',
seed=None):
super().__init__(
outputs=head.RegressionHead(output_dim=output_dim,
loss=loss,
metrics=metrics),
column_names=column_names,
column_types=column_types,
max_trials=max_trials,
directory=directory,
name=name,
objective=objective,
seed=seed) |
return super().predict(x=x,
batch_size=batch_size, |
Sbs.tsx | import React, { /*useCallback, useEffect, */useMemo, useState } from 'react';
import Page from '../../components/Page';
import PitImage from '../../assets/img/pit.png';
import { createGlobalStyle } from 'styled-components';
import { Route, Switch, useRouteMatch } from 'react-router-dom';
import { useWallet } from 'use-wallet';
import UnlockWallet from '../../components/UnlockWallet';
import PageHeader from '../../components/PageHeader';
import { Box,/* Paper, Typography,*/ Button, Grid } from '@material-ui/core';
import styled from 'styled-components';
import Spacer from '../../components/Spacer';
import usePolarlysFinance from '../../hooks/usePolarlysFinance';
import { getDisplayBalance/*, getBalance*/ } from '../../utils/formatBalance';
import { BigNumber/*, ethers*/ } from 'ethers';
import useSwapStardustToBorealis from '../../hooks/BorealisSwapper/useSwapStarDustToBorealis';
import useApprove, { ApprovalState } from '../../hooks/useApprove';
import useBorealisSwapperStats from '../../hooks/BorealisSwapper/useBorealisSwapperStats';
import TokenInput from '../../components/TokenInput';
import Card from '../../components/Card';
import CardContent from '../../components/CardContent';
import TokenSymbol from '../../components/TokenSymbol';
const BackgroundImage = createGlobalStyle`
body {
background: url(${PitImage}) no-repeat !important;
background-size: cover !important;
}
`;
function | (n: any) {
return !isNaN(parseFloat(n)) && isFinite(n);
}
const Sbs: React.FC = () => {
const { path } = useRouteMatch();
const { account } = useWallet();
const polarlysFinance = usePolarlysFinance();
const [stardustAmount, setStardustAmount] = useState('');
const [borealisAmount, setBorealisAmount] = useState('');
const [approveStatus, approve] = useApprove(polarlysFinance.STARDUST, polarlysFinance.contracts.BorealisSwapper.address);
const { onSwapBorealis } = useSwapStardustToBorealis();
const borealisSwapperStat = useBorealisSwapperStats(account);
const borealisBalance = useMemo(() => (borealisSwapperStat ? Number(borealisSwapperStat.borealisBalance) : 0), [borealisSwapperStat]);
const bondBalance = useMemo(() => (borealisSwapperStat ? Number(borealisSwapperStat.stardustBalance) : 0), [borealisSwapperStat]);
const handleStarDustChange = async (e: any) => {
if (e.currentTarget.value === '') {
setStardustAmount('');
setBorealisAmount('');
return
}
if (!isNumeric(e.currentTarget.value)) return;
setStardustAmount(e.currentTarget.value);
const updateBorealisAmount = await polarlysFinance.estimateAmountOfBorealis(e.currentTarget.value);
setBorealisAmount(updateBorealisAmount);
};
const handleStarDustSelectMax = async () => {
setStardustAmount(String(bondBalance));
const updateBorealisAmount = await polarlysFinance.estimateAmountOfBorealis(String(bondBalance));
setBorealisAmount(updateBorealisAmount);
};
const handleBorealisSelectMax = async () => {
setBorealisAmount(String(borealisBalance));
const rateBorealisPerNebula = (await polarlysFinance.getBorealisSwapperStat(account)).rateBorealisPerNebula;
const updateStarDustAmount = ((BigNumber.from(10).pow(30)).div(BigNumber.from(rateBorealisPerNebula))).mul(Number(borealisBalance) * 1e6);
setStardustAmount(getDisplayBalance(updateStarDustAmount, 18, 6));
};
const handleBorealisChange = async (e: any) => {
const inputData = e.currentTarget.value;
if (inputData === '') {
setBorealisAmount('');
setStardustAmount('');
return
}
if (!isNumeric(inputData)) return;
setBorealisAmount(inputData);
const rateBorealisPerNebula = (await polarlysFinance.getBorealisSwapperStat(account)).rateBorealisPerNebula;
const updateStarDustAmount = ((BigNumber.from(10).pow(30)).div(BigNumber.from(rateBorealisPerNebula))).mul(Number(inputData) * 1e6);
setStardustAmount(getDisplayBalance(updateStarDustAmount, 18, 6));
}
return (
<Switch>
<Page>
<BackgroundImage />
{!!account ? (
<>
<Route exact path={path}>
<PageHeader icon={'🏦'} title="StarDust -> Borealis Swap" subtitle="Swap StarDust to Borealis" />
</Route>
<Box mt={5}>
<Grid container justify="center" spacing={6}>
<StyledBoardroom>
<StyledCardsWrapper>
<StyledCardWrapper>
<Card>
<CardContent>
<StyledCardContentInner>
<StyledCardTitle>StarDusts</StyledCardTitle>
<StyledExchanger>
<StyledToken>
<StyledCardIcon>
<TokenSymbol symbol={polarlysFinance.STARDUST.symbol} size={54} />
</StyledCardIcon>
</StyledToken>
</StyledExchanger>
<Grid item xs={12}>
<TokenInput
onSelectMax={handleStarDustSelectMax}
onChange={handleStarDustChange}
value={stardustAmount}
max={bondBalance}
symbol="StarDust"
></TokenInput>
</Grid>
<StyledDesc>{`${bondBalance} STARDUST Available in Wallet`}</StyledDesc>
</StyledCardContentInner>
</CardContent>
</Card>
</StyledCardWrapper>
<Spacer size="lg" />
<StyledCardWrapper>
<Card>
<CardContent>
<StyledCardContentInner>
<StyledCardTitle>Borealis</StyledCardTitle>
<StyledExchanger>
<StyledToken>
<StyledCardIcon>
<TokenSymbol symbol={polarlysFinance.BOREALIS.symbol} size={54} />
</StyledCardIcon>
</StyledToken>
</StyledExchanger>
<Grid item xs={12}>
<TokenInput
onSelectMax={handleBorealisSelectMax}
onChange={handleBorealisChange}
value={borealisAmount}
max={borealisBalance}
symbol="Borealis"
></TokenInput>
</Grid>
<StyledDesc>{`${borealisBalance} BOREALIS Available in Swapper`}</StyledDesc>
</StyledCardContentInner>
</CardContent>
</Card>
</StyledCardWrapper>
</StyledCardsWrapper>
</StyledBoardroom>
</Grid>
</Box>
<Box mt={5}>
<Grid container justify="center">
<Grid item xs={8}>
<Card>
<CardContent>
<StyledApproveWrapper>
{approveStatus !== ApprovalState.APPROVED ? (
<Button
disabled={approveStatus !== ApprovalState.NOT_APPROVED}
color="primary"
variant="contained"
onClick={approve}
size="medium"
>
Approve STARDUST
</Button>
) : (
<Button
color="primary"
variant="contained"
onClick={() => onSwapBorealis(stardustAmount.toString())}
size="medium"
>
Swap
</Button>
)}
</StyledApproveWrapper>
</CardContent>
</Card>
</Grid>
</Grid>
</Box>
</>
) : (
<UnlockWallet />
)}
</Page>
</Switch>
);
};
const StyledBoardroom = styled.div`
align-items: center;
display: flex;
flex-direction: column;
@media (max-width: 768px) {
width: 100%;
}
`;
const StyledCardsWrapper = styled.div`
display: flex;
@media (max-width: 768px) {
width: 100%;
flex-flow: column nowrap;
align-items: center;
}
`;
const StyledCardWrapper = styled.div`
display: flex;
flex: 1;
flex-direction: column;
@media (max-width: 768px) {
width: 100%;
}
`;
const StyledApproveWrapper = styled.div`
margin-left: auto;
margin-right: auto;
`;
const StyledCardTitle = styled.div`
align-items: center;
display: flex;
font-size: 20px;
font-weight: 700;
height: 64px;
justify-content: center;
margin-top: ${(props) => -props.theme.spacing[3]}px;
`;
const StyledCardIcon = styled.div`
background-color: ${(props) => props.theme.color.grey[900]};
width: 72px;
height: 72px;
border-radius: 36px;
display: flex;
align-items: center;
justify-content: center;
margin-bottom: ${(props) => props.theme.spacing[2]}px;
`;
const StyledExchanger = styled.div`
align-items: center;
display: flex;
margin-bottom: ${(props) => props.theme.spacing[5]}px;
`;
const StyledToken = styled.div`
align-items: center;
display: flex;
flex-direction: column;
font-weight: 600;
`;
const StyledCardContentInner = styled.div`
align-items: center;
display: flex;
flex: 1;
flex-direction: column;
justify-content: space-between;
`;
const StyledDesc = styled.span``;
export default Sbs;
| isNumeric |
zone6.py | # System imports
from datetime import datetime
import time
import json
import logging
# Package imports
from flask import Blueprint
from flask import render_template
from flask import jsonify
from flask import request
# Local imports
import common
api = Blueprint('zone6', __name__, url_prefix='/zone6')
rack_prefix = 'RACK'
rack_suffixes = ['A1', 'A2', 'A3', 'A4',
'B1', 'B2', 'B3', 'B4',
'C1', 'C2', 'C3', 'C4',
'D1', 'D2', 'D3', 'D4',
'E1', 'E2', 'E3', 'E4',
'F1', 'F2', 'F3', 'F4',
'G1', 'G2', 'G3', 'G4',
'H1', 'H2', 'H3', 'H4',
'J1', 'J2', 'J3', 'J4',
'K1', 'K2', 'K3', 'K4',
'L1', 'L2', 'L3', 'L4',
'M1', 'M2', 'M3', 'M4',
'N1', 'N2', 'N3', 'N4',
'P1', 'P2', 'P3', 'P4',
'Q1', 'Q2', 'Q3', 'Q4',
'R1', 'R2', 'R3', 'R4',
'X1', 'X2', 'X3', 'X4',
'X5', 'X6', 'X7', 'X8',
'X9', 'X10', 'X11', 'X12',
]
rack_locations = ['-'.join([rack_prefix, suffix])
for suffix in rack_suffixes]
beamlines = ['i03',
'i04',
'i04-1',
'i24',
]
beamline_prefix = 'BEAMLINE'
beamline_locations = ['{}-{}'.format(beamline_prefix, x.upper()) for x in beamlines]
beamline_locations.extend(['USER-COLLECTION',
'STORES-OUT',
'ZONE-6-STORE',
])
"""
App to demonstrate use of vuejs
"""
@api.route("/vdewars")
def vdewars():
return render_template('vue-dewars.html', title="Zone6 Dewars", api_prefix="zone6", rack_locations=rack_locations)
@api.route('/')
def index():
"""
Main page for dewar management
"""
return render_template('dewars.html',
title="zone6 Dewar Management",
rack_locations=rack_locations,
rack_suffixes=rack_suffixes,
rack_prefix=rack_prefix,
beamlines=beamline_locations,
api_prefix="zone6",
)
@api.route('/dewars', methods=["GET", "POST", "DELETE"])
def location():
"""
API route for dewar management
"""
result = {}
status_code = 200
if request.method == "GET":
# Get any dewar with any rack location
# There should only be one per location
# Simple call so use controller directly
result, status_code = common.find_dewars_by_location(rack_locations)
elif request.method == "POST":
location = request.form['location']
barcode = request.form['barcode']
result, status_code = common.update_dewar_location(barcode, location)
elif request.method == "DELETE":
try:
location = request.form['location']
except KeyError:
# No form data (used axios?) Try params
location = request.args.get('location')
result, status_code = common.remove_dewar_from_location(location)
else:
result = {'location': '',
'barcode': '',
'status': 'fail',
'reason': 'Method/route not implemented yet'}
status_code = 501
return jsonify(result), status_code
@api.route('/dewars/find', methods=["GET"])
def | ():
"""
Return a list of matching dewars with this facility code
Should be requested with parameters in the URL ?fc=DLS-MS-1234 request
We specifically return the status code so the front end can show feedback
"""
facilitycode = request.args.get('fc')
result, status_code = common.find_dewar(facilitycode)
return jsonify(result), status_code
| find |
cluster_test.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package internal
import (
"context"
"fmt"
"testing"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func podReady(isReady corev1.ConditionStatus) corev1.PodCondition {
return corev1.PodCondition{
Type: corev1.PodReady,
Status: isReady,
}
}
type checkStaticPodReadyConditionTest struct {
name string
conditions []corev1.PodCondition
}
func TestCheckStaticPodReadyCondition(t *testing.T) {
table := []checkStaticPodReadyConditionTest{
{
name: "pod is ready",
conditions: []corev1.PodCondition{podReady(corev1.ConditionTrue)},
},
}
for _, test := range table {
t.Run(test.name, func(t *testing.T) {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{Conditions: test.conditions},
}
if err := checkStaticPodReadyCondition(pod); err != nil {
t.Fatalf("should not have gotten an error: %v", err)
}
})
}
}
func TestCheckStaticPodNotReadyCondition(t *testing.T) {
table := []checkStaticPodReadyConditionTest{
{
name: "no pod status",
},
{
name: "not ready pod status",
conditions: []corev1.PodCondition{podReady(corev1.ConditionFalse)},
},
}
for _, test := range table {
t.Run(test.name, func(t *testing.T) {
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-pod",
},
Spec: corev1.PodSpec{},
Status: corev1.PodStatus{Conditions: test.conditions},
}
if err := checkStaticPodReadyCondition(pod); err == nil {
t.Fatal("should have returned an error")
}
})
}
}
func TestControlPlaneIsHealthy(t *testing.T) {
readyStatus := corev1.PodStatus{
Conditions: []corev1.PodCondition{
{
Type: corev1.PodReady,
Status: corev1.ConditionTrue,
},
},
}
workloadCluster := &cluster{
client: &fakeClient{
list: nodeListForTestControlPlaneIsHealthy(),
get: map[string]interface{}{
"kube-system/kube-apiserver-first-control-plane": &corev1.Pod{Status: readyStatus},
"kube-system/kube-apiserver-second-control-plane": &corev1.Pod{Status: readyStatus},
"kube-system/kube-apiserver-third-control-plane": &corev1.Pod{Status: readyStatus},
"kube-system/kube-controller-manager-first-control-plane": &corev1.Pod{Status: readyStatus},
"kube-system/kube-controller-manager-second-control-plane": &corev1.Pod{Status: readyStatus},
"kube-system/kube-controller-manager-third-control-plane": &corev1.Pod{Status: readyStatus},
},
},
}
health, err := workloadCluster.controlPlaneIsHealthy(context.Background())
if err != nil {
t.Fatal(err)
}
if len(health) == 0 {
t.Fatal("no nodes were checked")
}
if len(health) != len(nodeListForTestControlPlaneIsHealthy().Items) {
t.Fatal("not all nodes were checked")
}
}
func nodeListForTestControlPlaneIsHealthy() *corev1.NodeList {
nodeNamed := func(name string) corev1.Node {
return corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
}
return &corev1.NodeList{
Items: []corev1.Node{
nodeNamed("first-control-plane"),
nodeNamed("second-control-plane"),
nodeNamed("third-control-plane"),
},
}
}
func TestGetMachinesForCluster(t *testing.T) {
m := ManagementCluster{Client: &fakeClient{
list: machineListForTestGetMachinesForCluster(),
}}
clusterKey := types.NamespacedName{
Namespace: "my-namespace",
Name: "my-cluster",
}
machines, err := m.GetMachinesForCluster(context.Background(), clusterKey)
if err != nil {
t.Fatal(err)
}
if len(machines) != 3 {
t.Fatalf("expected 3 machines but found %d", len(machines))
}
// Test the OwnedControlPlaneMachines works
machines, err = m.GetMachinesForCluster(context.Background(), clusterKey, OwnedControlPlaneMachines("my-control-plane"))
if err != nil {
t.Fatal(err)
}
if len(machines) != 1 {
t.Fatalf("expected 1 control plane machine but got %d", len(machines))
}
// Test that the filters use AND logic instead of OR logic
nameFilter := func(cluster *clusterv1.Machine) bool {
return cluster.Name == "first-machine"
}
machines, err = m.GetMachinesForCluster(context.Background(), clusterKey, OwnedControlPlaneMachines("my-control-plane"), nameFilter)
if err != nil {
t.Fatal(err)
}
if len(machines) != 1 {
t.Fatalf("expected 1 control plane machine but got %d", len(machines))
}
}
func machineListForTestGetMachinesForCluster() *clusterv1.MachineList {
owned := true
ownedRef := []metav1.OwnerReference{
{
Kind: "KubeadmControlPlane",
Name: "my-control-plane",
Controller: &owned,
},
}
machine := func(name string) clusterv1.Machine {
return clusterv1.Machine{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "my-namespace",
Labels: map[string]string{
clusterv1.ClusterLabelName: "my-cluster",
},
},
}
}
controlPlaneMachine := machine("first-machine")
controlPlaneMachine.ObjectMeta.Labels[clusterv1.MachineControlPlaneLabelName] = ""
controlPlaneMachine.OwnerReferences = ownedRef
return &clusterv1.MachineList{
Items: []clusterv1.Machine{
controlPlaneMachine,
machine("second-machine"),
machine("third-machine"),
},
}
}
type fakeClient struct {
client.Client
list interface{}
get map[string]interface{}
}
func (f *fakeClient) Get(_ context.Context, key client.ObjectKey, obj runtime.Object) error {
item := f.get[key.String()]
switch l := item.(type) {
case *corev1.Pod:
l.DeepCopyInto(obj.(*corev1.Pod))
default:
return fmt.Errorf("unknown type: %s", l)
}
return nil
}
func (f *fakeClient) List(_ context.Context, list runtime.Object, _ ...client.ListOption) error {
switch l := f.list.(type) {
case *clusterv1.MachineList:
l.DeepCopyInto(list.(*clusterv1.MachineList)) | l.DeepCopyInto(list.(*corev1.NodeList))
default:
return fmt.Errorf("unknown type: %s", l)
}
return nil
} | case *corev1.NodeList: |
GameManager.ts | import * as R from "ramda";
import Game from "buzzwords-shared/Game";
import { HexCoord } from "buzzwords-shared/types";
import {
getCellsToBeReset,
willBecomeOwned,
} from "buzzwords-shared/gridHelpers";
import { isValidWord } from "buzzwords-shared/alphaHelpers";
import { nanoid } from "nanoid";
import HexGrid, {
makeHexGrid,
getCell,
getCellNeighbors,
setCell,
getNewCellValues,
} from "buzzwords-shared/hexgrid";
import { WordsObject, wordsBySortedLetters } from "./words";
import Cell from "buzzwords-shared/cell";
export default class | {
game: Game | null;
constructor(game: Game | null) {
this.game = game;
}
pass(userId: string): Game {
if (!this.game) {
throw new Error("Game Manager has no game!");
}
if (!this.game.users.includes(userId)) {
throw new Error("Not your game");
}
if (this.game.gameOver) {
throw new Error("Game is over");
}
const turnUser = this.game.users[this.game.turn];
if (userId != turnUser) {
throw new Error("Not your turn");
}
if (this.game.users.length != 2) {
throw new Error("Need another player");
}
let opponentHasCapital = false;
const opponentCells = [];
for (const cell of Object.values(this.game.grid)) {
if (cell.owner == Number(!this.game.turn)) {
opponentCells.push(cell);
if (cell.capital) {
opponentHasCapital = true;
}
}
}
if (!opponentHasCapital) {
const newCapital =
opponentCells[Math.floor(Math.random() * opponentCells.length)];
newCapital.capital = true;
setCell(this.game.grid, newCapital);
}
this.game.moves.push({
coords: [],
grid: this.game.grid,
letters: [],
player: this.game.turn,
pass: true,
});
const nextTurn = Number(!this.game.turn) as 0 | 1;
this.game.turn = nextTurn;
return this.game;
}
forfeit(userId: string): Game {
if (!this.game) {
throw new Error("Game Manager has no game!");
}
if (!this.game.users.includes(userId)) {
throw new Error("Not your game");
}
if (this.game.gameOver) {
throw new Error("Game is over");
}
if (this.game.users.length != 2) {
throw new Error("Need another player");
}
const idx = this.game.users.indexOf(userId) as 0 | 1;
this.game.winner = Number(!idx) as 0 | 1;
this.game.gameOver = true;
this.game.moves.push({
coords: [],
grid: this.game.grid,
letters: [],
player: idx,
forfeit: true,
});
return this.game;
}
makeMove(userId: string, move: HexCoord[]): Game {
if (!this.game) {
throw new Error("Game Manager has no game!");
}
if (!this.game.users.includes(userId)) {
throw new Error("Not your game");
}
if (this.game.gameOver) {
throw new Error("Game is over");
}
const turnUser = this.game.users[this.game.turn];
if (userId != turnUser) {
throw new Error("Not your turn");
}
if (this.game.users.length != 2) {
throw new Error("Need another player");
}
let word = "";
for (const coord of move) {
try {
const cell = getCell(this.game.grid, coord.q, coord.r);
if (cell && cell.owner == 2 && cell.value) {
word += cell.value;
} else {
throw new Error("Cell in use or inactive");
}
} catch (e) {
throw new Error("Invalid coords");
}
}
console.log("move received word", word);
if (!isValidWord(word, WordsObject)) {
console.log("word invalid", word);
throw new Error("Not a valid word");
}
const gridCopy: { [coord: string]: Cell } = {};
Object.keys(this.game.grid).forEach((key) => {
const cell = this.game && this.game.grid[key];
if (cell) {
gridCopy[key] = R.omit(["_id"], cell);
}
});
const gameMove = {
grid: gridCopy,
coords: move,
letters: move.map(
(m) => getCell(this.game?.grid as HexGrid, m.q, m.r)?.value ?? ""
),
player: this.game.turn,
date: new Date(),
shuffle: false,
};
const turn = this.game.turn;
let capitalCaptured = false;
// Make all tiles adjacent to move neutral and active
const resetTiles = getCellsToBeReset(this.game.grid, move, this.game.turn);
// Parsed word, checked validity of move and word etc.
// Have to check for what's attached to current territory to see what to expand
// Have to check from above to see what is adjacent to enemy territory to see what to remove
// change whose turn it is
const toBecomeOwned = willBecomeOwned(this.game.grid, move, this.game.turn);
const toBeReset = R.difference(resetTiles, toBecomeOwned);
for (const cell of toBecomeOwned) {
cell.owner = this.game.turn;
if (cell.owner == this.game.turn) {
cell.value = "";
}
setCell(this.game.grid, cell);
}
const keys = R.difference(
R.difference(
Object.keys(this.game.grid),
toBeReset.map((cell) => `${cell.q},${cell.r}`)
),
toBecomeOwned.map((cell) => `${cell.q},${cell.r}`)
);
const grid = this.game.grid;
const letters = keys.map((k) => grid[k].value).filter(Boolean);
const opponentKeys = Object.entries(this.game.grid)
.filter(([k, c]) => c.owner == Number(!turn))
.map(([k, c]) => k);
const gameOver =
R.difference(
opponentKeys,
[...toBeReset, ...toBecomeOwned].map((c) => `${c.q},${c.r}`)
).length === 0;
if (!gameOver) {
try {
const newCellValues = getNewCellValues(
letters,
toBeReset.length,
WordsObject
);
for (let i = 0; i < toBeReset.length; i++) {
const tile = toBeReset[i];
tile.owner = 2;
if (tile.capital) {
capitalCaptured = true;
tile.capital = false;
}
tile.value = newCellValues[i];
setCell(this.game.grid, tile);
}
} catch (e) {
// No possible combinations. Need to regenerate the whole board!!
console.log("No valid letter combinations. Shuffling board...");
const newLetterCount = letters.length + toBeReset.length;
const newCellValues = getNewCellValues([], newLetterCount, WordsObject);
for (const tile of keys
.map((k) => grid[k])
.filter((k) => Boolean(k.value))) {
tile.owner = 2;
tile.value = newCellValues[0];
newCellValues.splice(0, 1);
setCell(this.game.grid, tile);
}
for (const tile of toBeReset) {
tile.owner = 2;
if (tile.capital) {
capitalCaptured = true;
tile.capital = false;
}
tile.value = newCellValues[0];
newCellValues.splice(0, 1);
setCell(this.game.grid, tile);
}
gameMove.shuffle = true;
}
this.game.moves.push(gameMove);
} else {
for (const c of toBeReset) {
c.value = "";
c.owner = 2;
setCell(this.game.grid, c);
}
this.game.moves.push(gameMove);
this.game.gameOver = true;
this.game.winner = this.game.turn;
return this.game;
}
for (const cell of Object.values(this.game.grid)) {
if (cell.owner == 2) {
const neighbors = getCellNeighbors(this.game.grid, cell.q, cell.r);
const playerNeighbors = neighbors.filter((c) => c.owner != 2);
if (!playerNeighbors.length) {
cell.value = "";
setCell(this.game.grid, cell);
}
}
}
const cells = this.game.grid;
const opponentCells = [];
let opponentHasCapital = false;
for (const cell of Object.values(cells)) {
if (cell.owner == Number(!this.game.turn)) {
opponentCells.push(cell);
if (cell.capital) {
opponentHasCapital = true;
break;
}
}
}
// If opponent has no capital at the end of your turn
// but you didn't capture their capital this turn
// randomly assign one of their cells to be capital
if (!capitalCaptured && !opponentHasCapital) {
const newCapital =
opponentCells[Math.floor(Math.random() * opponentCells.length)];
newCapital.capital = true;
setCell(this.game.grid, newCapital);
}
const nextTurn = capitalCaptured
? this.game.turn
: (Number(!this.game.turn) as 0 | 1);
this.game.turn = nextTurn;
return this.game;
}
createGame(userId: string): Game {
const game: Game = {
id: nanoid(),
turn: 0 as 0 | 1,
users: [userId],
grid: makeHexGrid(),
gameOver: false,
winner: null,
moves: [],
vsAI: false,
difficulty: 1,
deleted: false,
};
const neighbors = [
...getCellNeighbors(game.grid, -2, -1),
...getCellNeighbors(game.grid, 2, 1),
];
const newValues = getNewCellValues([], 12, WordsObject);
let i = 0;
for (const cell of neighbors) {
cell.value = newValues[i];
i++;
game.grid = setCell(game.grid, cell);
}
game.grid["-2,-1"].capital = true;
game.grid["-2,-1"].owner = 0;
game.grid["2,1"].capital = true;
game.grid["2,1"].owner = 1;
this.game = game;
return game;
}
}
| GameManager |
usart.rs | #![no_std]
#![no_main]
#![feature(trait_alias)]
#![feature(min_type_alias_impl_trait)]
#![feature(impl_trait_in_bindings)]
#![feature(type_alias_impl_trait)]
#![allow(incomplete_features)]
#[path = "../example_common.rs"]
mod example_common;
use cortex_m::prelude::_embedded_hal_blocking_serial_Write;
use embassy::executor::Executor;
use embassy::time::Clock;
use embassy::util::Forever; |
use cortex_m_rt::entry;
use stm32f4::stm32f429 as pac;
#[embassy::task]
async fn main_task() {
let p = embassy_stm32::init(Default::default());
let config = Config::default();
let mut usart = Uart::new(p.USART3, p.PD9, p.PD8, config, 16_000_000);
usart.bwrite_all(b"Hello Embassy World!\r\n").unwrap();
info!("wrote Hello, starting echo");
let mut buf = [0u8; 1];
loop {
usart.read(&mut buf).unwrap();
usart.bwrite_all(&buf).unwrap();
}
}
struct ZeroClock;
impl Clock for ZeroClock {
fn now(&self) -> u64 {
0
}
}
static EXECUTOR: Forever<Executor> = Forever::new();
#[entry]
fn main() -> ! {
info!("Hello World!");
let pp = pac::Peripherals::take().unwrap();
pp.DBGMCU.cr.modify(|_, w| {
w.dbg_sleep().set_bit();
w.dbg_standby().set_bit();
w.dbg_stop().set_bit()
});
pp.RCC.ahb1enr.modify(|_, w| w.dma1en().enabled());
pp.RCC.ahb1enr.modify(|_, w| {
w.gpioaen().enabled();
w.gpioben().enabled();
w.gpiocen().enabled();
w.gpioden().enabled();
w.gpioeen().enabled();
w.gpiofen().enabled();
w
});
pp.RCC.apb2enr.modify(|_, w| {
w.syscfgen().enabled();
w
});
pp.RCC.apb1enr.modify(|_, w| {
w.usart3en().enabled();
w
});
unsafe { embassy::time::set_clock(&ZeroClock) };
let executor = EXECUTOR.put(Executor::new());
executor.run(|spawner| {
unwrap!(spawner.spawn(main_task()));
})
} | use embassy_stm32::usart::{Config, Uart};
use example_common::*; |
import.go | // +build ignore
// Imports HomeKit metadata from a file and creates files for every characteristic and service.
// It finishes by running `go fmt` in the characterist and service packages.
//
// The metadata file is created by running the following command on OS X
//
// plutil -convert json -r -o $GOPATH/src/github.com/brutella/hc/gen/metadata.json /Applications/HomeKit\ Accessory\ Simulator.app/Contents/Frameworks/HAPAccessoryKit.framework/Versions/A/Resources/default.metadata.plist
package main
import (
"encoding/json"
"github.com/brutella/hc/gen"
"github.com/brutella/hc/gen/golang"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
)
var LibPath = os.ExpandEnv("$GOPATH/src/github.com/brutella/hc")
var GenPath = filepath.Join(LibPath, "gen")
var SvcPkgPath = filepath.Join(LibPath, "service")
var AccPkgPath = filepath.Join(LibPath, "accessory")
var CharPkgPath = filepath.Join(LibPath, "characteristic")
var MetadataPath = filepath.Join(GenPath, "metadata.json")
func | () {
log.Println("Import data from", MetadataPath)
// Open metadata file
f, err := os.Open(MetadataPath)
if err != nil {
log.Fatal(err)
}
// Read content
b, err := ioutil.ReadAll(f)
if err != nil {
log.Fatal(err)
}
// Import json
metadata := gen.Metadata{}
err = json.Unmarshal(b, &metadata)
if err != nil {
log.Fatal(err)
}
// Create characteristic files
for _, char := range metadata.Characteristics {
log.Printf("Processing %s Characteristic", char.Name)
if b, err := golang.CharacteristicGoCode(char); err != nil {
log.Println(err)
} else {
filePath := filepath.Join(CharPkgPath, golang.CharacteristicFileName(char))
log.Println("Creating file", filePath)
if f, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666); err != nil {
log.Fatal(err)
} else {
if _, err := f.Write(b); err != nil {
log.Fatal()
}
}
}
}
// Create service files
for _, svc := range metadata.Services {
log.Printf("Processing %s Service", svc.Name)
if b, err := golang.ServiceGoCode(svc, metadata.Characteristics); err != nil {
log.Println(err)
} else {
filePath := filepath.Join(SvcPkgPath, golang.ServiceFileName(svc))
log.Println("Creating file", filePath)
if f, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666); err != nil {
log.Fatal(err)
} else {
if _, err := f.Write(b); err != nil {
log.Fatal()
}
}
}
}
// Create an accessory categories file
if b, err := golang.CategoriesGoCode(metadata.Categories); err != nil {
log.Println(err)
} else {
filePath := filepath.Join(AccPkgPath, "constant.go")
log.Println("Creating file", filePath)
if f, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666); err != nil {
log.Fatal(err)
} else {
if _, err := f.Write(b); err != nil {
log.Fatal()
}
}
}
log.Println("Running go fmt")
charCmd := exec.Command("go", "fmt")
charCmd.Dir = os.ExpandEnv(CharPkgPath)
if err := charCmd.Run(); err != nil {
log.Fatal(err)
}
svcCmd := exec.Command("go", "fmt")
svcCmd.Dir = os.ExpandEnv(SvcPkgPath)
if err := svcCmd.Run(); err != nil {
log.Fatal(err)
}
accCmd := exec.Command("go", "fmt")
accCmd.Dir = os.ExpandEnv(AccPkgPath)
if err := accCmd.Run(); err != nil {
log.Fatal(err)
}
}
| main |
find_reaction_MOD.js | module.exports = {
name: 'Find Reaction',
section: 'Reaction Control',
subtitle(data) {
return `${data.find}`;
},
variableStorage(data, varType) {
if (parseInt(data.storage, 10) !== varType) return;
return [data.varName2, 'Reaction'];
},
fields: ['message', 'varName', 'info', 'find', 'storage', 'varName2'],
html(isEvent, data) {
return `
<div>
<div style="float: left; width: 35%;">
Source Message:<br>
<select id="message" class="round" onchange="glob.messageChange(this, 'varNameContainer')">
${data.messages[isEvent ? 1 : 0]}
</select>
</div>
<div id="varNameContainer" style="display: none; float: right; width: 60%;">
Variable Name:<br>
<input id="varName" class="round" type="text" list="variableList"><br>
</div>
</div><br><br><br><br>
<div>
<div style="float: left; width: 40%;">
Source Emoji:<br>
<select id="info" class="round">
<option value="0" selected>Emoji ID</option>
<option value="1">Emoji Name</option>
</select>
</div>
<div style="float: right; width: 55%;">
Search Value:<br>
<input id="find" class="round" type="text">
</div>
</div><br><br><br><br>
<div style="padding-top: 8px;">
<div style="float: left; width: 35%;">
Store In:<br>
<select id="storage" class="round">
${data.variables[1]}
</select>
</div>
<div id="varNameContainer2" style="float: right; width: 60%;">
Variable Name:<br>
<input id="varName2" class="round" type="text">
</div> | init() {
const { glob, document } = this;
glob.messageChange(document.getElementById('message'), 'varNameContainer');
},
action(cache) {
const data = cache.actions[cache.index];
const message = parseInt(data.message, 10);
const varName = this.evalMessage(data.varName, cache);
const msg = this.getMessage(message, varName, cache);
const info = parseInt(data.info, 10);
const emoji = this.evalMessage(data.find, cache);
let result;
switch (info) {
case 0:
result = msg.reactions.cache.get(emoji);
break;
case 1:
result = msg.reactions.cache.find((r) => r.emoji.name === emoji);
break;
default:
break;
}
if (result === undefined) return this.callNextAction(cache);
const storage = parseInt(data.storage, 10);
const varName2 = this.evalMessage(data.varName2, cache);
result.fetch().then((react) => {
this.storeValue(react, storage, varName2, cache);
this.callNextAction(cache);
});
},
mod() {},
}; | </div>`;
},
|
lib.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
#![forbid(unsafe_code)]
pub mod access_path;
pub mod account_address;
pub mod account_config;
pub mod account_state_blob; | pub mod contract_event;
pub mod crypto_proxies;
pub mod discovery_set;
pub mod event;
pub mod get_with_proof;
pub mod identifier;
pub mod language_storage;
pub mod ledger_info;
pub mod proof;
#[cfg(any(test, feature = "fuzzing"))]
pub mod proptest_types;
pub mod proto;
#[cfg(any(test, feature = "fuzzing"))]
pub mod test_helpers;
pub mod transaction;
pub mod validator_change;
pub mod validator_public_keys;
pub mod validator_set;
pub mod validator_signer;
pub mod validator_verifier;
pub mod vm_error;
pub mod write_set;
pub use account_address::AccountAddress as PeerId;
#[cfg(test)]
mod unit_tests; | pub mod block_info;
pub mod block_metadata;
pub mod byte_array; |
46-es2015.0cf930f673136fcc53f9.js | (window.webpackJsonp=window.webpackJsonp||[]).push([[46],{yRpg:function(t,i,r){"use strict";r.r(i),r.d(i,"ion_chip",function(){return a});var n=r("mLBW"),o=(r("TJLY"),r("Jbqe"));class a{constructor(t){Object(n.m)(this,t),this.outline=!1}hostData(){const t=Object(n.e)(this);return{class:Object.assign({},Object(o.a)(this.color),{[t]:!0,"chip-outline":this.outline,"ion-activatable":!0})}}__stencil_render(){const t=Object(n.e)(this);return[Object(n.i)("slot",null),"md"===t?Object(n.i)("ion-ripple-effect",null):null]}render(){return Object(n.i)(n.a,this.hostData(),this.__stencil_render())}static get style(){return":host{--background:rgba(var(--ion-text-color-rgb,0,0,0),0.12);--color:rgba(var(--ion-text-color-rgb,0,0,0),0.87);border-radius:16px;-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;margin-left:4px;margin-right:4px;margin-top:4px;margin-bottom:4px;padding-left:12px;padding-right:12px;padding-top:7px;padding-bottom:7px;display:-ms-inline-flexbox;display:inline-flex;position:relative;-ms-flex-align:center;align-items:center;height:32px;background:var(--background);color:var(--color);font-family:var(--ion-font-family,inherit);font-size:14px;line-height:1;cursor:pointer;overflow:hidden;vertical-align:middle;-webkit-box-sizing:border-box;box-sizing:border-box}@supports ((-webkit-margin-start:0) or (margin-inline-start:0)) or (-webkit-margin-start:0){:host{margin-left:unset;margin-right:unset;-webkit-margin-start:4px;margin-inline-start:4px;-webkit-margin-end:4px;margin-inline-end:4px;padding-left:unset;padding-right:unset;-webkit-padding-start:12px;padding-inline-start:12px;-webkit-padding-end:12px;padding-inline-end:12px}}:host(.ion-color){background:rgba(var(--ion-color-base-rgb),.08);color:var(--ion-color-shade)}:host(.ion-color:focus){background:rgba(var(--ion-color-base-rgb),.12)}:host(.ion-color.activated){background:rgba(var(--ion-color-base-rgb),.16)}:host(.chip-outline){border-width:1px;border-style:solid;border-color:rgba(0,0,0,.32);background:transparent}:host(.chip-outline.ion-color){border-color:rgba(var(--ion-color-base-rgb),.32)}:host(.chip-outline:not(.ion-color):focus){background:rgba(0,0,0,.04)}:host(.chip-outline.activated:not(.ion-color)){background:rgba(0,0,0,.08)}::slotted(ion-icon){font-size:20px}:host(:not(.ion-color)) ::slotted(ion-icon){color:rgba(0,0,0,.54)}::slotted(ion-icon:first-child){margin-left:-4px;margin-right:8px;margin-top:-4px;margin-bottom:-4px}@supports ((-webkit-margin-start:0) or (margin-inline-start:0)) or (-webkit-margin-start:0){::slotted(ion-icon:first-child){margin-left:unset;margin-right:unset;-webkit-margin-start:-4px;margin-inline-start:-4px;-webkit-margin-end:8px;margin-inline-end:8px}}::slotted(ion-icon:last-child){margin-left:8px;margin-right:-4px;margin-top:-4px;margin-bottom:-4px}@supports ((-webkit-margin-start:0) or (margin-inline-start:0)) or (-webkit-margin-start:0){::slotted(ion-icon:last-child){margin-left:unset;margin-right:unset;-webkit-margin-start:8px;margin-inline-start:8px;-webkit-margin-end:-4px;margin-inline-end:-4px}}::slotted(ion-avatar){width:24px;height:24px}::slotted(ion-avatar:first-child){margin-left:-8px;margin-right:8px;margin-top:-4px;margin-bottom:-4px}@supports ((-webkit-margin-start:0) or (margin-inline-start:0)) or (-webkit-margin-start:0){::slotted(ion-avatar:first-child){margin-left:unset;margin-right:unset;-webkit-margin-start:-8px;margin-inline-start:-8px;-webkit-margin-end:8px;margin-inline-end:8px}}::slotted(ion-avatar:last-child){margin-left:8px;margin-right:-8px;margin-top:-4px;margin-bottom:-4px}@supports ((-webkit-margin-start:0) or (margin-inline-start:0)) or (-webkit-margin-start:0){::slotted(ion-avatar:last-child){margin-left:unset;margin-right:unset;-webkit-margin-start:8px;margin-inline-start:8px;-webkit-margin-end:-8px;margin-inline-end:-8px}}:host(:focus){outline:none;--background:rgba(var(--ion-text-color-rgb,0,0,0),0.16)}:host(.activated){--background:rgba(var(--ion-text-color-rgb,0,0,0),0.2)}@media (any-hover:hover){:host(:hover){--background:rgba(var(--ion-text-color-rgb,0,0,0),0.16)}:host(.ion-color:hover){background:rgba(var(--ion-color-base-rgb),.12)}:host(.chip-outline:not(.ion-color):hover){background:rgba(var(--ion-text-color-rgb,0,0,0),.04)}}"}}}}]); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.