file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
lib.rs | use boxcars::{NetworkParse, ParserBuilder};
use wasm_bindgen::prelude::*;
fn parse_header(data: &[u8], pretty: bool) -> Result<String, JsValue> {
let replay = ParserBuilder::new(data)
.with_network_parse(NetworkParse::Never)
.on_error_check_crc()
.parse();
replay
.map_err(|e| JsValue::from_str(e.to_string().as_str()))
.and_then(|x| {
let res = if pretty {
serde_json::to_string_pretty(&x)
} else {
serde_json::to_string(&x)
};
res.map_err(|e| JsValue::from_str(e.to_string().as_str()))
})
}
fn parse_network(data: &[u8], pretty: bool) -> Result<Vec<u8>, JsValue> {
let replay = ParserBuilder::new(data)
.with_network_parse(NetworkParse::Always)
.on_error_check_crc()
.parse();
replay
.map_err(|e| JsValue::from_str(e.to_string().as_str()))
.and_then(|x| {
let res = if pretty {
serde_json::to_vec_pretty(&x)
} else {
serde_json::to_vec(&x)
};
res.map_err(|e| JsValue::from_str(e.to_string().as_str()))
})
}
#[wasm_bindgen]
pub fn parse_replay_header(data: &[u8]) -> Result<String, JsValue> |
#[wasm_bindgen]
pub fn parse_replay_header_pretty(data: &[u8]) -> Result<String, JsValue> {
parse_header(data, true)
}
#[wasm_bindgen]
pub fn parse_replay_network(data: &[u8]) -> Result<Vec<u8>, JsValue> {
parse_network(data, false)
}
#[wasm_bindgen]
pub fn parse_replay_network_pretty(data: &[u8]) -> Result<Vec<u8>, JsValue> {
parse_network(data, true)
}
| {
parse_header(data, false)
} |
git_test.go | /*
Copyright 2020 WILDCARD SA.
Licensed under the WILDCARD SA License, Version 1.0 (the "License");
WILDCARD SA is register in french corporation.
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.w6d.io/licenses/LICENSE-1.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is prohibited.
Created on 21/04/2021
*/
package secrets_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
ci "github.com/w6d-io/ci-operator/api/v1alpha1"
"github.com/w6d-io/ci-operator/internal"
"github.com/w6d-io/ci-operator/internal/k8s/secrets"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
)
var _ = Describe("Git secret", func() {
Context("", func() {
BeforeEach(func() {
})
AfterEach(func() {
})
It("", func() {
var err error
s := &secrets.Secret{
WorkFlowStruct: internal.WorkFlowStruct{
Play: &ci.Play{
ObjectMeta: metav1.ObjectMeta{
Name: "play-git-1",
Namespace: "p6e-cx-11",
},
Spec: ci.PlaySpec{
ProjectID: 11,
PipelineID: 1,
Secret: map[ci.SecretKind]string{
ci.GitToken: "token git",
},
},
},
},
}
By("fail on domain check")
s.Play.Spec.RepoURL = "http://{}"
err = s.GitCreate(ctx, k8sClient, ctrl.Log)
Expect(err).ToNot(Succeed())
Expect(err.Error()).To(ContainSubstring("invalid character"))
s.Play.Spec.RepoURL = "https://github.com"
By("fail controller reference")
s.Scheme = runtime.NewScheme()
err = s.GitCreate(ctx, k8sClient, ctrl.Log)
Expect(err).ToNot(Succeed())
Expect(err.Error()).To(ContainSubstring("no kind is registered for the type"))
By("fail to create ")
s.Scheme = scheme
err = s.GitCreate(ctx, k8sClient, ctrl.Log)
Expect(err).ToNot(Succeed())
Expect(err.Error()).To(Equal(`namespaces "p6e-cx-11" not found`))
By("create namespace")
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "p6e-cx-11",
},
}
Expect(k8sClient.Create(ctx, ns)).To(Succeed())
By("create failed on sa update")
s.Play.Name = "play-test-11-1"
s.Play.UID = "77557df7-3162-46cf-9b3c-d3b9b70a42b8"
err = s.GitCreate(ctx, k8sClient, ctrl.Log)
Expect(err).ToNot(Succeed())
Expect(err.Error()).To(Equal(`serviceaccounts "sa-11-1" not found`))
By("create sa")
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: "sa-11-2",
Namespace: "p6e-cx-11", | }
Expect(k8sClient.Create(ctx, sa)).To(Succeed())
s.Play.Name = "play-test-11-2"
s.Play.UID = "fa567196-8be9-4934-a13f-cebf0a97caed"
s.Play.Spec.PipelineID = 2
Expect(s.GitCreate(ctx, k8sClient, ctrl.Log)).To(Succeed())
})
})
}) | }, |
main.py | import pandas
from pandas import DataFrame
import building_energy_data as bed
def main():
reporter = bed.BuidingEnergyReporter('file.csv') |
# Q1: What was the name of the building that had the largest NumberofFloors?
building_name = reporter.max_number_of_floors()
print('Question 1: Name of the building that had the largest NumberOfFloors', building_name)
# Q2: How many buildings had an ENERGYSTARScore of at least 97?
buildings_with_energyscore = reporter.energy_score_buildings(97)
total_rows = len(buildings_with_energyscore.index)
print('Question 2: There are', total_rows,
'buildings that had an ENERGYSTARScore of at least 97')
# Q3: What is the median of the Site Energy Use Index (the SiteEUI(kBtu/sf) column) among all buildings that used natural gas?
median = reporter.median_SiteEUI_gas()
print('Question 3: The median of the Site Energy Use Index (the SiteEUI(kBtu/sf) column) among all buildings that used natural gas is', median)
# Q4: Within the Ballard neighborhood, which buildings used more electricity than BIOMED FAIRVIEW RESEARCH CENTER in 2018?
electricity = reporter.ballard_building_used_electricity_more_threshold()
print('Question 4: The following buildings used more electricity than BIOMED FAIRVIEW RESEARCH CENTER in 2018 within the Ballard neighborhood: \n', electricity)
# Q5: Which properties have a larger property gross floor area for their buildings greater than 15 football fields (NFL) and are not offices or hospitals?
not_offices_not_hospitals_more_15_NFL = reporter.buildings_floor_area_more_15_NFL()
print('Question 5: The properties that have a larger property gross floor area for their buildings greater than 15 football fields (NFL) and are not offices or hospitals: ',
not_offices_not_hospitals_more_15_NFL)
if __name__ == '__main__':
main() | |
main.go | /*
Copyright 2021-2022 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/gravitational/teleport"
"github.com/gravitational/teleport/api/client/proto"
"github.com/gravitational/teleport/api/constants"
"github.com/gravitational/teleport/api/types"
"github.com/gravitational/teleport/lib/auth"
"github.com/gravitational/teleport/lib/auth/authclient"
"github.com/gravitational/teleport/lib/auth/native"
"github.com/gravitational/teleport/lib/client"
"github.com/gravitational/teleport/lib/services"
"github.com/gravitational/teleport/lib/tlsca"
"github.com/gravitational/teleport/lib/utils"
"github.com/gravitational/teleport/tool/tbot/config"
"github.com/gravitational/teleport/tool/tbot/destination"
"github.com/gravitational/teleport/tool/tbot/identity"
"github.com/gravitational/trace"
"github.com/kr/pretty"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
var log = logrus.WithFields(logrus.Fields{
trace.Component: teleport.ComponentTBot,
})
const (
authServerEnvVar = "TELEPORT_AUTH_SERVER"
tokenEnvVar = "TELEPORT_BOT_TOKEN"
)
func main() {
if err := Run(os.Args[1:]); err != nil {
utils.FatalError(err)
trace.DebugReport(err)
}
}
func Run(args []string) error {
var cf config.CLIConf
utils.InitLogger(utils.LoggingForDaemon, logrus.InfoLevel)
app := utils.InitCLIParser("tbot", "tbot: Teleport Machine ID").Interspersed(false)
app.Flag("debug", "Verbose logging to stdout").Short('d').BoolVar(&cf.Debug)
app.Flag("config", "tbot.yaml path").Short('c').StringVar(&cf.ConfigPath)
versionCmd := app.Command("version", "Print the version")
startCmd := app.Command("start", "Starts the renewal bot, writing certificates to the data dir at a set interval.")
startCmd.Flag("auth-server", "Specify the Teleport auth server host").Short('a').Envar(authServerEnvVar).StringVar(&cf.AuthServer)
startCmd.Flag("token", "A bot join token, if attempting to onboard a new bot; used on first connect.").Envar(tokenEnvVar).StringVar(&cf.Token)
startCmd.Flag("ca-pin", "A repeatable auth server CA hash to pin; used on first connect.").StringsVar(&cf.CAPins)
startCmd.Flag("data-dir", "Directory to store internal bot data.").StringVar(&cf.DataDir)
startCmd.Flag("destination-dir", "Directory to write generated certificates").StringVar(&cf.DestinationDir)
startCmd.Flag("certificate-ttl", "TTL of generated certificates").Default("60m").DurationVar(&cf.CertificateTTL)
startCmd.Flag("renewal-interval", "Interval at which certificates are renewed; must be less than the certificate TTL.").DurationVar(&cf.RenewalInterval)
startCmd.Flag("join-method", "Method to use to join the cluster.").Default(config.DefaultJoinMethod).EnumVar(&cf.JoinMethod, "token", "iam")
startCmd.Flag("oneshot", "If set, quit after the first renewal.").BoolVar(&cf.Oneshot)
initCmd := app.Command("init", "Initialize a certificate destination directory for writes from a separate bot user.")
initCmd.Flag("destination-dir", "If NOT using a config file, specify the destination directory.").StringVar(&cf.DestinationDir)
initCmd.Flag("init-dir", "If using a config file and multiple destinations are configured, specify which to initialize.").StringVar(&cf.InitDir)
initCmd.Flag("clean", "If set, remove unexpected files and directories from the destination.").BoolVar(&cf.Clean)
initCmd.Flag("reader-user", "If ACLs are in use, name of the Unix user "+
"that will read from the destination.",
).StringVar(&cf.ReaderUser)
initCmd.Flag("bot-user", "If ACLs are in use, name of the bot Unix user "+
"which should have write access to the destination.",
).StringVar(&cf.BotUser)
initCmd.Flag("owner", "Name of the user:group that will own the "+
"destination. If ACLs are in use, must be different from the reader "+
"user and defaults to nobody:nobody. Otherwise, assumes the current "+
"user.",
).StringVar(&cf.Owner)
configCmd := app.Command("config", "Parse and dump a config file").Hidden()
watchCmd := app.Command("watch", "Watch a destination directory for changes.").Hidden()
command, err := app.Parse(args)
if err != nil {
return trace.Wrap(err)
}
// While in debug mode, send logs to stdout.
if cf.Debug {
utils.InitLogger(utils.LoggingForDaemon, logrus.DebugLevel)
}
botConfig, err := config.FromCLIConf(&cf)
if err != nil {
return trace.Wrap(err)
}
switch command {
case versionCmd.FullCommand():
err = onVersion()
case startCmd.FullCommand():
err = onStart(botConfig)
case configCmd.FullCommand():
err = onConfig(botConfig)
case initCmd.FullCommand():
err = onInit(botConfig, &cf)
case watchCmd.FullCommand():
err = onWatch(botConfig)
default:
// This should only happen when there's a missing switch case above.
err = trace.BadParameter("command %q not configured", command)
}
return err
}
func onVersion() error {
utils.PrintVersion()
return nil
}
func onConfig(botConfig *config.BotConfig) error {
pretty.Println(botConfig)
return nil
}
func onWatch(botConfig *config.BotConfig) error {
return trace.NotImplemented("watch not yet implemented")
}
func | (botConfig *config.BotConfig) error {
if botConfig.AuthServer == "" {
return trace.BadParameter("an auth or proxy server must be set via --auth-server or configuration")
}
// First, try to make sure all destinations are usable.
if err := checkDestinations(botConfig); err != nil {
return trace.Wrap(err)
}
// Start by loading the bot's primary destination.
dest, err := botConfig.Storage.GetDestination()
if err != nil {
return trace.Wrap(err, "could not read bot storage destination from config")
}
reloadChan := make(chan struct{})
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go handleSignals(reloadChan, cancel)
configTokenHashBytes := []byte{}
if botConfig.Onboarding != nil && botConfig.Onboarding.Token != "" {
sha := sha256.Sum256([]byte(botConfig.Onboarding.Token))
configTokenHashBytes = []byte(hex.EncodeToString(sha[:]))
}
var authClient auth.ClientI
// First, attempt to load an identity from storage.
ident, err := identity.LoadIdentity(dest, identity.BotKinds()...)
if err == nil && !hasTokenChanged(ident.TokenHashBytes, configTokenHashBytes) {
identStr, err := describeTLSIdentity(ident)
if err != nil {
return trace.Wrap(err)
}
log.Infof("Successfully loaded bot identity, %s", identStr)
if err := checkIdentity(ident); err != nil {
return trace.Wrap(err)
}
if botConfig.Onboarding != nil {
log.Warn("Note: onboarding config ignored as identity was loaded from persistent storage")
}
authClient, err = authenticatedUserClientFromIdentity(ctx, ident, botConfig.AuthServer)
if err != nil {
return trace.Wrap(err)
}
} else {
// If the identity can't be loaded, assume we're starting fresh and
// need to generate our initial identity from a token
if ident != nil {
// If ident is set here, we detected a token change above.
log.Warnf("Detected a token change, will attempt to fetch a new identity.")
}
// TODO: validate that errors from LoadIdentity are sanely typed; we
// actually only want to ignore NotFound errors
// Verify we can write to the destination.
if err := identity.VerifyWrite(dest); err != nil {
return trace.Wrap(err, "Could not write to destination %s, aborting.", dest)
}
// Get first identity
ident, err = getIdentityFromToken(botConfig)
if err != nil {
return trace.Wrap(err)
}
log.Debug("Attempting first connection using initial auth client")
authClient, err = authenticatedUserClientFromIdentity(ctx, ident, botConfig.AuthServer)
if err != nil {
return trace.Wrap(err)
}
// Attempt a request to make sure our client works.
if _, err := authClient.Ping(ctx); err != nil {
return trace.Wrap(err, "unable to communicate with auth server")
}
identStr, err := describeTLSIdentity(ident)
if err != nil {
return trace.Wrap(err)
}
log.Infof("Successfully generated new bot identity, %s", identStr)
log.Debugf("Storing new bot identity to %s", dest)
if err := identity.SaveIdentity(ident, dest, identity.BotKinds()...); err != nil {
return trace.Wrap(err, "unable to save generated identity back to destination")
}
}
watcher, err := authClient.NewWatcher(ctx, types.Watch{
Kinds: []types.WatchKind{{
Kind: types.KindCertAuthority,
}},
})
if err != nil {
return trace.Wrap(err)
}
go watchCARotations(watcher)
defer watcher.Close()
return renewLoop(ctx, botConfig, authClient, ident, reloadChan)
}
func hasTokenChanged(configTokenBytes, identityBytes []byte) bool {
if len(configTokenBytes) == 0 || len(identityBytes) == 0 {
return false
}
return !bytes.Equal(identityBytes, configTokenBytes)
}
// checkDestinations checks all destinations and tries to create any that
// don't already exist.
func checkDestinations(cfg *config.BotConfig) error {
// Note: This is vaguely problematic as we don't recommend that users
// store renewable certs under the same user as end-user certs. That said,
// - if the destination was properly created via tbot init this is a no-op
// - if users intend to follow that advice but miss a step, it should fail
// due to lack of permissions
storage, err := cfg.Storage.GetDestination()
if err != nil {
return trace.Wrap(err)
}
// TODO: consider warning if ownership of all destintions is not expected.
if err := storage.Init(); err != nil {
return trace.Wrap(err)
}
for _, dest := range cfg.Destinations {
destImpl, err := dest.GetDestination()
if err != nil {
return trace.Wrap(err)
}
if err := destImpl.Init(); err != nil {
return trace.Wrap(err)
}
}
return nil
}
// checkIdentity performs basic startup checks on an identity and loudly warns
// end users if it is unlikely to work.
func checkIdentity(ident *identity.Identity) error {
var validAfter time.Time
var validBefore time.Time
if ident.X509Cert != nil {
validAfter = ident.X509Cert.NotBefore
validBefore = ident.X509Cert.NotAfter
} else if ident.SSHCert != nil {
validAfter = time.Unix(int64(ident.SSHCert.ValidAfter), 0)
validBefore = time.Unix(int64(ident.SSHCert.ValidBefore), 0)
} else {
return trace.BadParameter("identity is invalid and contains no certificates")
}
now := time.Now().UTC()
if now.After(validBefore) {
log.Errorf(
"Identity has expired. The renewal is likely to fail. (expires: %s, current time: %s)",
validBefore.Format(time.RFC3339),
now.Format(time.RFC3339),
)
} else if now.Before(validAfter) {
log.Warnf(
"Identity is not yet valid. Confirm that the system time is correct. (valid after: %s, current time: %s)",
validAfter.Format(time.RFC3339),
now.Format(time.RFC3339),
)
}
return nil
}
// handleSignals handles incoming Unix signals.
func handleSignals(reload chan struct{}, cancel context.CancelFunc) {
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGHUP, syscall.SIGUSR1)
for signal := range signals {
switch signal {
case syscall.SIGINT:
log.Info("Received interrupt, cancelling...")
cancel()
return
case syscall.SIGHUP, syscall.SIGUSR1:
log.Info("Received reload signal, reloading...")
reload <- struct{}{}
}
}
}
func watchCARotations(watcher types.Watcher) {
for {
select {
case event := <-watcher.Events():
log.Debugf("CA event: %+v", event)
// TODO: handle CA rotations
case <-watcher.Done():
if err := watcher.Error(); err != nil {
log.WithError(err).Warnf("error watching for CA rotations")
}
return
}
}
}
func getIdentityFromToken(cfg *config.BotConfig) (*identity.Identity, error) {
if cfg.Onboarding == nil {
return nil, trace.BadParameter("onboarding config required via CLI or YAML")
}
if cfg.Onboarding.Token == "" {
return nil, trace.BadParameter("unable to start: no token present")
}
addr, err := utils.ParseAddr(cfg.AuthServer)
if err != nil {
return nil, trace.WrapWithMessage(err, "invalid auth server address %+v", cfg.AuthServer)
}
tlsPrivateKey, sshPublicKey, tlsPublicKey, err := generateKeys()
if err != nil {
return nil, trace.WrapWithMessage(err, "unable to generate new keypairs")
}
log.Info("Attempting to generate new identity from token")
params := auth.RegisterParams{
Token: cfg.Onboarding.Token,
ID: auth.IdentityID{
Role: types.RoleBot,
},
Servers: []utils.NetAddr{*addr},
PublicTLSKey: tlsPublicKey,
PublicSSHKey: sshPublicKey,
CAPins: cfg.Onboarding.CAPins,
CAPath: cfg.Onboarding.CAPath,
GetHostCredentials: client.HostCredentials,
JoinMethod: cfg.Onboarding.JoinMethod,
}
certs, err := auth.Register(params)
if err != nil {
return nil, trace.Wrap(err)
}
sha := sha256.Sum256([]byte(params.Token))
tokenHash := hex.EncodeToString(sha[:])
ident, err := identity.ReadIdentityFromStore(&identity.LoadIdentityParams{
PrivateKeyBytes: tlsPrivateKey,
PublicKeyBytes: sshPublicKey,
TokenHashBytes: []byte(tokenHash),
}, certs, identity.BotKinds()...)
return ident, trace.Wrap(err)
}
func renewIdentityViaAuth(
ctx context.Context,
client auth.ClientI,
currentIdentity *identity.Identity,
cfg *config.BotConfig,
) (*identity.Identity, error) {
// TODO: enforce expiration > renewal period (by what margin?)
// If using the IAM join method we always go through the initial join flow
// and fetch new nonrenewable certs
var joinMethod types.JoinMethod
if cfg.Onboarding != nil {
joinMethod = cfg.Onboarding.JoinMethod
}
switch joinMethod {
case types.JoinMethodIAM:
ident, err := getIdentityFromToken(cfg)
return ident, trace.Wrap(err)
default:
}
// Ask the auth server to generate a new set of certs with a new
// expiration date.
certs, err := client.GenerateUserCerts(ctx, proto.UserCertsRequest{
PublicKey: currentIdentity.PublicKeyBytes,
Username: currentIdentity.X509Cert.Subject.CommonName,
Expires: time.Now().Add(cfg.CertificateTTL),
})
if err != nil {
return nil, trace.Wrap(err)
}
newIdentity, err := identity.ReadIdentityFromStore(
currentIdentity.Params(),
certs,
identity.BotKinds()...,
)
if err != nil {
return nil, trace.Wrap(err)
}
return newIdentity, nil
}
// fetchDefaultRoles requests the bot's own role from the auth server and
// extracts its full list of allowed roles.
func fetchDefaultRoles(ctx context.Context, roleGetter services.RoleGetter, botRole string) ([]string, error) {
role, err := roleGetter.GetRole(ctx, botRole)
if err != nil {
return nil, trace.Wrap(err)
}
conditions := role.GetImpersonateConditions(types.Allow)
return conditions.Roles, nil
}
// describeTLSIdentity writes an informational message about the given identity to
// the log.
func describeTLSIdentity(ident *identity.Identity) (string, error) {
cert := ident.X509Cert
if cert == nil {
return "", trace.BadParameter("attempted to describe TLS identity without TLS credentials")
}
tlsIdent, err := tlsca.FromSubject(cert.Subject, cert.NotAfter)
if err != nil {
return "", trace.Wrap(err, "bot TLS certificate can not be parsed as an identity")
}
var principals []string
for _, principal := range tlsIdent.Principals {
if !strings.HasPrefix(principal, constants.NoLoginPrefix) {
principals = append(principals, principal)
}
}
duration := cert.NotAfter.Sub(cert.NotBefore)
return fmt.Sprintf(
"valid: after=%v, before=%v, duration=%s | kind=tls, renewable=%v, disallow-reissue=%v, roles=%v, principals=%v, generation=%v",
cert.NotBefore.Format(time.RFC3339),
cert.NotAfter.Format(time.RFC3339),
duration,
tlsIdent.Renewable,
tlsIdent.DisallowReissue,
tlsIdent.Groups,
principals,
tlsIdent.Generation,
), nil
}
// describeSSHIdentity writes an informational message about the given SSH
// identity to the log.
func describeSSHIdentity(ident *identity.Identity) (string, error) {
cert := ident.SSHCert
if cert == nil {
return "", trace.BadParameter("attempted to describe SSH identity without SSH credentials")
}
renewable := false
if _, ok := cert.Extensions[teleport.CertExtensionRenewable]; ok {
renewable = true
}
disallowReissue := false
if _, ok := cert.Extensions[teleport.CertExtensionDisallowReissue]; ok {
disallowReissue = true
}
var roles []string
if rolesStr, ok := cert.Extensions[teleport.CertExtensionTeleportRoles]; ok {
if actualRoles, err := services.UnmarshalCertRoles(rolesStr); err == nil {
roles = actualRoles
}
}
var principals []string
for _, principal := range cert.ValidPrincipals {
if !strings.HasPrefix(principal, constants.NoLoginPrefix) {
principals = append(principals, principal)
}
}
duration := time.Second * time.Duration(cert.ValidBefore-cert.ValidAfter)
return fmt.Sprintf(
"valid: after=%v, before=%v, duration=%s | kind=ssh, renewable=%v, disallow-reissue=%v, roles=%v, principals=%v",
time.Unix(int64(cert.ValidAfter), 0).Format(time.RFC3339),
time.Unix(int64(cert.ValidBefore), 0).Format(time.RFC3339),
duration,
renewable,
disallowReissue,
roles,
principals,
), nil
}
// renew performs a single renewal
func renew(
ctx context.Context, cfg *config.BotConfig, client auth.ClientI,
ident *identity.Identity, botDestination destination.Destination,
) (auth.ClientI, *identity.Identity, error) {
// Make sure we can still write to the bot's destination.
if err := identity.VerifyWrite(botDestination); err != nil {
return nil, nil, trace.Wrap(err, "Cannot write to destination %s, aborting.", botDestination)
}
log.Debug("Attempting to renew bot certificates...")
newIdentity, err := renewIdentityViaAuth(ctx, client, ident, cfg)
if err != nil {
return nil, nil, trace.Wrap(err)
}
identStr, err := describeTLSIdentity(ident)
if err != nil {
return nil, nil, trace.Wrap(err, "Could not describe bot identity at %s", botDestination)
}
log.Infof("Successfully renewed bot certificates, %s", identStr)
// TODO: warn if duration < certTTL? would indicate TTL > server's max renewable cert TTL
// TODO: error if duration < renewalInterval? next renewal attempt will fail
// Immediately attempt to reconnect using the new identity (still
// haven't persisted the known-good certs).
newClient, err := authenticatedUserClientFromIdentity(ctx, newIdentity, cfg.AuthServer)
if err != nil {
return nil, nil, trace.Wrap(err)
}
// Attempt a request to make sure our client works.
// TODO: consider a retry/backoff loop.
if _, err := newClient.Ping(ctx); err != nil {
return nil, nil, trace.Wrap(err, "unable to communicate with auth server")
}
log.Debug("Auth client now using renewed credentials.")
client = newClient
ident = newIdentity
// Now that we're sure the new creds work, persist them.
if err := identity.SaveIdentity(newIdentity, botDestination, identity.BotKinds()...); err != nil {
return nil, nil, trace.Wrap(err)
}
// Determine the default role list based on the bot role. The role's
// name should match the certificate's Key ID (user and role names
// should all match bot-$name)
botResourceName := ident.X509Cert.Subject.CommonName
defaultRoles, err := fetchDefaultRoles(ctx, client, botResourceName)
if err != nil {
log.WithError(err).Warnf("Unable to determine default roles, no roles will be requested if unspecified")
defaultRoles = []string{}
}
// Next, generate impersonated certs
expires := ident.X509Cert.NotAfter
for _, dest := range cfg.Destinations {
destImpl, err := dest.GetDestination()
if err != nil {
return nil, nil, trace.Wrap(err)
}
// Check the ACLs. We can't fix them, but we can warn if they're
// misconfigured. We'll need to precompute a list of keys to check.
// Note: This may only log a warning, depending on configuration.
if err := destImpl.Verify(identity.ListKeys(dest.Kinds...)); err != nil {
return nil, nil, trace.Wrap(err)
}
// Ensure this destination is also writable. This is a hard fail if
// ACLs are misconfigured, regardless of configuration.
// TODO: consider not making these a hard error? e.g. write other
// destinations even if this one is broken?
if err := identity.VerifyWrite(destImpl); err != nil {
return nil, nil, trace.Wrap(err, "Could not write to destination %s, aborting.", destImpl)
}
var desiredRoles []string
if len(dest.Roles) > 0 {
desiredRoles = dest.Roles
} else {
log.Debugf("Destination specified no roles, defaults will be requested: %v", defaultRoles)
desiredRoles = defaultRoles
}
impersonatedIdent, err := generateImpersonatedIdentity(ctx, client, ident, expires, desiredRoles, dest.Kinds)
if err != nil {
return nil, nil, trace.Wrap(err, "Failed to generate impersonated certs for %s: %+v", destImpl, err)
}
var impersonatedIdentStr string
if dest.ContainsKind(identity.KindTLS) {
impersonatedIdentStr, err = describeTLSIdentity(impersonatedIdent)
if err != nil {
return nil, nil, trace.Wrap(err, "could not describe impersonated certs for destination %s", destImpl)
}
} else {
// Note: kinds must contain at least 1 of TLS or SSH
impersonatedIdentStr, err = describeSSHIdentity(impersonatedIdent)
if err != nil {
return nil, nil, trace.Wrap(err, "could not describe impersonated certs for destination %s", destImpl)
}
}
log.Infof("Successfully renewed impersonated certificates for %s, %s", destImpl, impersonatedIdentStr)
if err := identity.SaveIdentity(impersonatedIdent, destImpl, dest.Kinds...); err != nil {
return nil, nil, trace.Wrap(err, "failed to save impersonated identity to destination %s", destImpl)
}
for _, templateConfig := range dest.Configs {
template, err := templateConfig.GetConfigTemplate()
if err != nil {
return nil, nil, trace.Wrap(err)
}
if err := template.Render(ctx, client, impersonatedIdent, dest); err != nil {
log.WithError(err).Warnf("Failed to render config template %+v", templateConfig)
}
}
}
log.Infof("Persisted new certificates to disk. Next renewal in approximately %s", cfg.RenewalInterval)
return newClient, newIdentity, nil
}
func renewLoop(ctx context.Context, cfg *config.BotConfig, client auth.ClientI, ident *identity.Identity, reloadChan chan struct{}) error {
// TODO: failures here should probably not just end the renewal loop, there
// should be some retry / back-off logic.
// TODO: what should this interval be? should it be user configurable?
// Also, must be < the validity period.
// TODO: validate that cert is actually renewable.
log.Infof("Beginning renewal loop: ttl=%s interval=%s", cfg.CertificateTTL, cfg.RenewalInterval)
if cfg.RenewalInterval > cfg.CertificateTTL {
log.Errorf(
"Certificate TTL (%s) is shorter than the renewal interval (%s). The next renewal is likely to fail.",
cfg.CertificateTTL,
cfg.RenewalInterval,
)
}
// Determine where the bot should write its internal data (renewable cert
// etc)
botDestination, err := cfg.Storage.GetDestination()
if err != nil {
return trace.Wrap(err)
}
ticker := time.NewTicker(cfg.RenewalInterval)
defer ticker.Stop()
for {
newClient, newIdentity, err := renew(ctx, cfg, client, ident, botDestination)
if err != nil {
return trace.Wrap(err)
}
if cfg.Oneshot {
log.Info("Oneshot mode enabled, exiting successfully.")
break
}
client = newClient
ident = newIdentity
select {
case <-ctx.Done():
return nil
case <-ticker.C:
continue
case <-reloadChan:
continue
}
}
return nil
}
// authenticatedUserClientFromIdentity creates a new auth client from the given
// identity. Note that depending on the connection address given, this may
// attempt to connect via the proxy and therefore requires both SSH and TLS
// credentials.
func authenticatedUserClientFromIdentity(ctx context.Context, id *identity.Identity, authServer string) (auth.ClientI, error) {
if id.SSHCert == nil || id.X509Cert == nil {
return nil, trace.BadParameter("auth client requires a fully formed identity")
}
tlsConfig, err := id.TLSConfig(nil /* cipherSuites */)
if err != nil {
return nil, trace.Wrap(err)
}
sshConfig, err := id.SSHClientConfig()
if err != nil {
return nil, trace.Wrap(err)
}
authAddr, err := utils.ParseAddr(authServer)
if err != nil {
return nil, trace.Wrap(err)
}
authClientConfig := &authclient.Config{
TLS: tlsConfig,
SSH: sshConfig,
AuthServers: []utils.NetAddr{*authAddr},
Log: log,
}
c, err := authclient.Connect(ctx, authClientConfig)
return c, trace.Wrap(err)
}
func generateImpersonatedIdentity(
ctx context.Context,
client auth.ClientI,
currentIdentity *identity.Identity,
expires time.Time,
roleRequests []string,
kinds []identity.ArtifactKind,
) (*identity.Identity, error) {
// TODO: enforce expiration > renewal period (by what margin?)
// Generate a fresh keypair for the impersonated identity. We don't care to
// reuse keys here: impersonated certs might not be as well-protected so
// constantly rotating private keys
privateKey, publicKey, err := native.GenerateKeyPair("")
if err != nil {
return nil, trace.Wrap(err)
}
// First, ask the auth server to generate a new set of certs with a new
// expiration date.
certs, err := client.GenerateUserCerts(ctx, proto.UserCertsRequest{
PublicKey: publicKey,
Username: currentIdentity.X509Cert.Subject.CommonName,
Expires: expires,
RoleRequests: roleRequests,
})
if err != nil {
return nil, trace.Wrap(err)
}
// The root CA included with the returned user certs will only contain the
// Teleport User CA. We'll also need the host CA for future API calls.
localCA, err := client.GetClusterCACert()
if err != nil {
return nil, trace.Wrap(err)
}
caCerts, err := tlsca.ParseCertificatePEMs(localCA.TLSCA)
if err != nil {
return nil, trace.Wrap(err)
}
// Append the host CAs from the auth server.
for _, cert := range caCerts {
pemBytes, err := tlsca.MarshalCertificatePEM(cert)
if err != nil {
return nil, trace.Wrap(err)
}
certs.TLSCACerts = append(certs.TLSCACerts, pemBytes)
}
newIdentity, err := identity.ReadIdentityFromStore(&identity.LoadIdentityParams{
PrivateKeyBytes: privateKey,
PublicKeyBytes: publicKey,
}, certs, kinds...)
if err != nil {
return nil, trace.Wrap(err)
}
return newIdentity, nil
}
func generateKeys() (private, sshpub, tlspub []byte, err error) {
privateKey, publicKey, err := native.GenerateKeyPair("")
if err != nil {
return nil, nil, nil, trace.Wrap(err)
}
sshPrivateKey, err := ssh.ParseRawPrivateKey(privateKey)
if err != nil {
return nil, nil, nil, trace.Wrap(err)
}
tlsPublicKey, err := tlsca.MarshalPublicKeyFromPrivateKeyPEM(sshPrivateKey)
if err != nil {
return nil, nil, nil, trace.Wrap(err)
}
return privateKey, publicKey, tlsPublicKey, nil
}
| onStart |
s3.go | package test
import (
"fmt"
"net"
"testing"
"time"
)
// s3Lock is used to start a maximum of 5 Minio instances for testing.
var s3Lock = make(chan struct{}, 5)
// S3 starts up an S3-compatible object storage using Docker for testing, and returns an object that
// can be queried for connection parameters. When the test finishes it automatically tears down the object storage.
//
// The connection parameters can be fetched from the returned helper object.
func S3(t *testing.T) S3Helper {
s3Lock <- struct{}{}
t.Cleanup(func() {
<-s3Lock
})
accessKey := "test"
secretKey := "testtest"
env := []string{
fmt.Sprintf("MINIO_ROOT_USER=%s", accessKey),
fmt.Sprintf("MINIO_ROOT_PASSWORD=%s", secretKey),
}
t.Log("Starting Minio in a container...")
m := &minio{
cnt: containerFromPull(
t,
"docker.io/minio/minio",
[]string{"server", "/testdata"},
env,
map[string]string{
"9000/tcp": "",
},
),
accessKey: accessKey,
secretKey: secretKey,
t: t,
}
m.wait()
m.t.Logf("Minio is now available at 127.0.0.1:%d.", m.cnt.port("9000/tcp"))
return m
}
// S3Helper gives access to an S3-compatible object storage.
type S3Helper interface {
// URL returns the endpoint for the S3 connection.
URL() string
// AccessKey returns the access key ID that can be used to access the S3 service.
AccessKey() string
// SecretKey returns the secret access key that can be used to access the S3 service.
SecretKey() string
// Region returns the S3 region string to use.
Region() string
// PathStyle returns true if path-style access should be used.
PathStyle() bool
}
type minio struct {
cnt container
accessKey string
secretKey string
t *testing.T
}
func (m *minio) PathStyle() bool {
return true
}
func (m *minio) Region() string {
return "us-east-1"
}
func (m *minio) URL() string {
return fmt.Sprintf("http://127.0.0.1:%d/", m.cnt.port("9000/tcp"))
}
func (m *minio) AccessKey() string {
return m.accessKey
}
func (m *minio) SecretKey() string {
return m.secretKey
}
| sleepTime := 5
for {
if tries > 30 {
m.t.Fatalf("Minio failed to come up in %d seconds.", sleepTime*30)
}
sock, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", m.cnt.port("9000/tcp")))
time.Sleep(time.Duration(sleepTime) * time.Second)
if err != nil {
tries++
} else {
_ = sock.Close()
return
}
}
} | func (m *minio) wait() {
m.t.Log("Waiting for Minio to come up...")
tries := 0 |
HTMLKeygenElement.js | /**
* @license
* Copyright (c) 2014 The Polymer Project Authors. All rights reserved.
* This code may only be used under the BSD style license found at http://polymer.github.io/LICENSE.txt | * The complete set of contributors may be found at http://polymer.github.io/CONTRIBUTORS.txt
* Code distributed by Google as part of the polymer project is also
* subject to an additional IP rights grant found at http://polymer.github.io/PATENTS.txt
*/
suite('HTMLKeygenElement', function() {
// Not implemented in Firefox.
if (!window.HTMLKeygenElement)
return;
test('form', function() {
var form = document.createElement('form');
var keygen = document.createElement('keygen');
form.appendChild(keygen);
assert.equal(keygen.form, form);
});
test('instanceof', function() {
assert.instanceOf(document.createElement('keygen'), HTMLKeygenElement);
});
test('constructor', function() {
assert.equal(HTMLKeygenElement,
document.createElement('keygen').constructor);
});
}); | * The complete set of authors may be found at http://polymer.github.io/AUTHORS.txt |
lib.rs | /*
HELLOGAUGES
Copyright (C) 2022 Adrián Romero
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
use wasm_bindgen::prelude::*;
use yew::prelude::*;
mod utils;
use hellogauges::CircularGauge;
use hellogauges::ControlGauge;
use hellogauges::DialGauge;
use hellogauges::LiquidGauge;
use hellogauges::MetroGauge;
use hellogauges::SimpleGauge;
use hellogauges::{Arc, Section};
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
fn use_counter(
init: f64,
min: f64,
max: f64,
) -> (f64, Box<dyn Fn(f64) -> Box<dyn Fn(MouseEvent) -> ()>>) {
let counter_hook = use_state(|| init);
let counter: f64 = *counter_hook;
let inc_counter: Box<dyn Fn(f64) -> Box<dyn Fn(MouseEvent) -> ()>> =
Box::new(move |inc: f64| {
let counter_hook = counter_hook.clone();
Box::new(move |_| {
let value = *counter_hook + inc;
let value = if value > max { max } else { value };
let value = if value < min { min } else { value };
counter_hook.set(value);
})
});
(counter, inc_counter)
}
#[function_component(App)]
fn app_component() -> Html { | wasm_bindgen(start)]
pub fn run_app() -> Result<(), JsValue> {
utils::set_panic_hook();
yew::start_app::<App>();
// let root = document()
// .query_selector("#root")
// .expect("can't get #root node for rendering")
// .expect("can't unwrap #root node");
// yew::start_app_in_element::<App>(root);
Ok(())
}
|
let (counter, inc_counter) = use_counter(-2.0, -10.0, 20.0);
let (power, inc_power) = use_counter(60.0, 0.0, 100.0);
let (usage, inc_usage) = use_counter(50.0, 0.0, 100.0);
let (weight, inc_weight) = use_counter(72.0, 40.0, 120.0);
let (km, inc_km) = use_counter(50.0, 0.0, 120.0);
let (humidity, inc_humidity) = use_counter(60.0, 0.0, 100.0);
html! {
<>
<h1>{"HELLOGAUGES"}</h1>
<div>{"Gauge Components for the Yew framework"}</div>
<div class="gaugegallery">
<div class="gaugecontainer">
<div></div>
<div class="gaugetitle">
<button onclick={inc_counter(-1.0)}>{ "<<" }</button>
{"\u{00a0}CircularGauge\u{00a0}"}
<button onclick={inc_counter(1.0)}>{ ">>" }</button>
</div>
<div class="gaugetitle">
<button onclick={inc_power(-2.0)}>{ "<<" }</button>
{"\u{00a0}SimpleGauge\u{00a0}"}
<button onclick={inc_power(2.0)}>{ ">>" }</button>
</div>
<div class="gaugetitle">
<button onclick={inc_usage(-5.0)}>{ "<<" }</button>
{"\u{00a0}ControlGauge\u{00a0}"}
<button onclick={inc_usage(5.0)}>{ ">>" }</button>
</div>
</div>
<div class="gaugecontainer">
<div class="gaugestyle">{"Default gauges style"}</div>
<div>
<CircularGauge value = { Some(counter) } pattern="°C,1" title = "Temperature" min = {-10.0} max= {20.0} />
</div>
<div>
<SimpleGauge value = { Some(power) } pattern="kW,0" title = "Power" min = {0.0} max= {100.0} />
</div>
<div>
<ControlGauge value = { Some(usage) } pattern="Gb,0" title = "Usage" min = {0.0} max= {100.0} />
</div>
</div>
<div class="gaugecontainer gaugestyled">
<div class="gaugestyle">{"Default gauges style"}</div>
<div>
<CircularGauge value = { Some(counter) } pattern="°C,1" title = "Temperature" min = {-10.0} max= {20.0} >
<Arc start = {-10.0} end = 5.0 style = "stroke: #0000FF30;" />
<Arc start = 5.0 end = 20.0 style = "stroke: #FF000030;" />
</CircularGauge>
</div>
<div>
<SimpleGauge value = { Some(power) } pattern="kW,0" title = "Power" min = {0.0} max= {100.0} >
<Arc start = 0.0 end = 20.0 style = "stroke: green;" />
<Arc start = 20.0 end = 80.0 style = "stroke: lightgray;" />
<Arc start = 80.0 end = 100.0 style = "stroke: red;" />
</SimpleGauge>
</div>
<div>
<ControlGauge value = { Some(usage) } pattern="Gb,0" title = "Usage" min = {0.0} max= {100.0} >
<Arc start = 80.0 end = 100.0 r = 0.8 />
</ControlGauge>
</div>
</div>
<div class="gaugecontainer">
<div></div>
<div class="gaugetitle">
<button onclick={inc_weight(-2.0)}>{ "<<" }</button>
{"\u{00a0}DialGauge\u{00a0}"}
<button onclick={inc_weight(2.0)}>{ ">>" }</button>
</div>
<div class="gaugetitle">
<button onclick={inc_km(-5.0)}>{ "<<" }</button>
{"\u{00a0}MetroGauge\u{00a0}"}
<button onclick={inc_km(5.0)}>{ ">>" }</button>
</div>
<div class="gaugetitle">
<button onclick={inc_humidity(-5.0)}>{ "<<" }</button>
{"\u{00a0}LiquidGauge\u{00a0}"}
<button onclick={inc_humidity(5.0)}>{ ">>" }</button>
</div>
</div>
<div class="gaugecontainer">
<div class="gaugestyle">{"Default gauges style"}</div>
<div>
<DialGauge value = { Some(weight) } pattern="Kg,3" title = "Weight" min = {40.0} max = {120.0} step = 2.0 step_label = 10.0/>
</div>
<div>
<MetroGauge value = { Some(km) } pattern="km/h,0" title = "Speedometer" min = {0.0} max = {120.0} />
</div>
<div>
<LiquidGauge value = { Some(humidity) } pattern="%,0" title = "Humidity" min = {0.0} max = {100.0} />
</div>
</div>
<div class="gaugecontainer gaugestyled">
<div class="gaugestyle">{"Styled gauges"}</div>
<div>
<DialGauge value = { Some(weight) } pattern="Kg,3" title = "Weight" min = {40.0} max = {120.0} step = 2.0 step_label = 10.0>
<Section start = 0.0 end = 10.0 />
<Section start = 10.0 end = 20.0 style = "stroke: red;" />
</DialGauge>
</div>
<div>
<MetroGauge value = { Some(km) } pattern="km/h,0" title = "Speedometer" min = {0.0} max = {120.0} />
</div>
<div>
<LiquidGauge value = { Some(humidity) } pattern="%,0" title = "Humidity" min = {0.0} max = {100.0} />
</div>
</div>
</div>
</>
}
}
#[ |
scope_test.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package queues
import (
"math/rand"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/pborman/uuid"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"golang.org/x/exp/slices"
enumsspb "go.temporal.io/server/api/enums/v1"
"go.temporal.io/server/common/predicates"
"go.temporal.io/server/service/history/tasks"
)
type (
scopeSuite struct {
suite.Suite
*require.Assertions
controller *gomock.Controller
}
)
func | (t *testing.T) {
s := new(scopeSuite)
suite.Run(t, s)
}
func (s *scopeSuite) SetupTest() {
s.Assertions = require.New(s.T())
s.controller = gomock.NewController(s.T())
}
func (s *scopeSuite) TearDownSuite() {
s.controller.Finish()
}
func (s *scopeSuite) TestContains() {
r := NewRandomRange()
namespaceIDs := []string{uuid.New(), uuid.New(), uuid.New()}
predicate := tasks.NewNamespacePredicate(namespaceIDs)
scope := NewScope(r, predicate)
for _, namespaceID := range namespaceIDs {
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(namespaceID).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).Times(1)
s.True(scope.Contains(mockTask))
mockTask.EXPECT().GetKey().Return(tasks.NewKey(r.ExclusiveMax.FireTime, r.ExclusiveMax.TaskID+1)).Times(1)
s.False(scope.Contains(mockTask))
}
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(uuid.New()).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).MaxTimes(1)
s.False(scope.Contains(mockTask))
}
func (s *scopeSuite) TestCanSplitByRange() {
r := NewRandomRange()
predicate := predicates.All[tasks.Task]()
scope := NewScope(r, predicate)
s.True(scope.CanSplitByRange(r.InclusiveMin))
s.True(scope.CanSplitByRange(r.ExclusiveMax))
s.True(scope.CanSplitByRange(NewRandomKeyInRange(r)))
s.False(scope.CanSplitByRange(tasks.NewKey(
r.InclusiveMin.FireTime,
r.InclusiveMin.TaskID-1,
)))
s.False(scope.CanSplitByRange(tasks.NewKey(
r.ExclusiveMax.FireTime.Add(time.Nanosecond),
r.ExclusiveMax.TaskID,
)))
}
func (s *scopeSuite) TestSplitByRange() {
r := NewRandomRange()
predicate := predicates.All[tasks.Task]()
scope := NewScope(r, predicate)
splitKey := NewRandomKeyInRange(r)
leftScope, rightScope := scope.SplitByRange(splitKey)
s.Equal(NewRange(r.InclusiveMin, splitKey), leftScope.Range)
s.Equal(NewRange(splitKey, r.ExclusiveMax), rightScope.Range)
s.Equal(predicate, leftScope.Predicate)
s.Equal(predicate, rightScope.Predicate)
}
func (s *scopeSuite) TestSplitByPredicate_SamePredicateType() {
r := NewRandomRange()
namespaceIDs := []string{uuid.New(), uuid.New(), uuid.New(), uuid.New()}
predicate := tasks.NewNamespacePredicate(namespaceIDs)
scope := NewScope(r, predicate)
splitNamespaceIDs := append(slices.Clone(namespaceIDs[:rand.Intn(len(namespaceIDs))]), uuid.New(), uuid.New())
splitPredicate := tasks.NewNamespacePredicate(splitNamespaceIDs)
passScope, failScope := scope.SplitByPredicate(splitPredicate)
s.Equal(r, passScope.Range)
s.Equal(r, failScope.Range)
for _, namespaceID := range splitNamespaceIDs {
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(namespaceID).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
if slices.Contains(namespaceIDs, namespaceID) {
s.True(passScope.Contains(mockTask))
} else {
s.False(passScope.Contains(mockTask))
}
s.False(failScope.Contains(mockTask))
}
for _, namespaceID := range namespaceIDs {
if slices.Contains(splitNamespaceIDs, namespaceID) {
continue
}
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(namespaceID).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
s.False(passScope.Contains(mockTask))
s.True(failScope.Contains(mockTask))
}
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(uuid.New()).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
s.False(passScope.Contains(mockTask))
s.False(failScope.Contains(mockTask))
}
func (s *scopeSuite) TestSplitByPredicate_DifferentPredicateType() {
r := NewRandomRange()
namespaceIDs := []string{uuid.New(), uuid.New(), uuid.New(), uuid.New()}
predicate := tasks.NewNamespacePredicate(namespaceIDs)
scope := NewScope(r, predicate)
splitTaskTypes := []enumsspb.TaskType{
enumsspb.TaskType(rand.Intn(10)),
enumsspb.TaskType(rand.Intn(10)),
enumsspb.TaskType(rand.Intn(10)),
enumsspb.TaskType(rand.Intn(10)),
}
splitPredicate := tasks.NewTypePredicate(splitTaskTypes)
passScope, failScope := scope.SplitByPredicate(splitPredicate)
s.Equal(r, passScope.Range)
s.Equal(r, failScope.Range)
for _, namespaceID := range namespaceIDs {
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(namespaceID).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
for _, typeType := range splitTaskTypes {
mockTask.EXPECT().GetType().Return(typeType).Times(2)
s.True(passScope.Contains(mockTask))
s.False(failScope.Contains(mockTask))
}
mockTask.EXPECT().GetType().Return(enumsspb.TaskType(rand.Intn(10) + 10)).Times(2)
s.False(passScope.Contains(mockTask))
s.True(failScope.Contains(mockTask))
}
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(uuid.New()).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
for _, typeType := range splitTaskTypes {
mockTask.EXPECT().GetType().Return(typeType).MaxTimes(2)
s.False(passScope.Contains(mockTask))
s.False(failScope.Contains(mockTask))
}
mockTask.EXPECT().GetType().Return(enumsspb.TaskType(rand.Intn(10) + 10)).MaxTimes(2)
s.False(passScope.Contains(mockTask))
s.False(failScope.Contains(mockTask))
}
func (s *scopeSuite) TestCanMergeByRange() {
// TODO: add test for validating scope predicate
r := NewRandomRange()
predicate := predicates.All[tasks.Task]()
scope := NewScope(r, predicate)
incomingScope := NewScope(r, predicate)
s.True(scope.CanMergeByRange(incomingScope))
incomingScope = NewScope(NewRange(tasks.MinimumKey, r.InclusiveMin), predicate)
s.True(scope.CanMergeByRange(incomingScope))
incomingScope = NewScope(NewRange(r.ExclusiveMax, tasks.MaximumKey), predicate)
s.True(scope.CanMergeByRange(incomingScope))
incomingScope = NewScope(NewRange(tasks.MinimumKey, NewRandomKeyInRange(r)), predicate)
s.True(scope.CanMergeByRange(incomingScope))
incomingScope = NewScope(NewRange(NewRandomKeyInRange(r), tasks.MaximumKey), predicate)
s.True(scope.CanMergeByRange(incomingScope))
incomingScope = NewScope(NewRange(tasks.MinimumKey, tasks.MaximumKey), predicate)
s.True(scope.CanMergeByRange(incomingScope))
incomingScope = NewScope(NewRange(
tasks.MinimumKey,
tasks.NewKey(r.InclusiveMin.FireTime, r.InclusiveMin.TaskID-1),
), predicate)
s.False(scope.CanMergeByRange(incomingScope))
incomingScope = NewScope(NewRange(
tasks.NewKey(r.ExclusiveMax.FireTime, r.ExclusiveMax.TaskID+1),
tasks.MaximumKey,
), predicate)
s.False(scope.CanMergeByRange(incomingScope))
}
func (s *scopeSuite) TestMergeByRange() {
r := NewRandomRange()
predicate := predicates.All[tasks.Task]()
scope := NewScope(r, predicate)
mergeRange := r
mergedScope := scope.MergeByRange(NewScope(mergeRange, predicate))
s.Equal(predicate, mergedScope.Predicate)
s.Equal(r, mergedScope.Range)
mergeRange = NewRange(tasks.MinimumKey, r.InclusiveMin)
mergedScope = scope.MergeByRange(NewScope(mergeRange, predicate))
s.Equal(predicate, mergedScope.Predicate)
s.Equal(NewRange(tasks.MinimumKey, r.ExclusiveMax), mergedScope.Range)
mergeRange = NewRange(r.ExclusiveMax, tasks.MaximumKey)
mergedScope = scope.MergeByRange(NewScope(mergeRange, predicate))
s.Equal(predicate, mergedScope.Predicate)
s.Equal(NewRange(r.InclusiveMin, tasks.MaximumKey), mergedScope.Range)
mergeRange = NewRange(tasks.MinimumKey, NewRandomKeyInRange(r))
mergedScope = scope.MergeByRange(NewScope(mergeRange, predicate))
s.Equal(predicate, mergedScope.Predicate)
s.Equal(NewRange(tasks.MinimumKey, r.ExclusiveMax), mergedScope.Range)
mergeRange = NewRange(NewRandomKeyInRange(r), tasks.MaximumKey)
mergedScope = scope.MergeByRange(NewScope(mergeRange, predicate))
s.Equal(predicate, mergedScope.Predicate)
s.Equal(NewRange(r.InclusiveMin, tasks.MaximumKey), mergedScope.Range)
mergeRange = NewRange(tasks.MinimumKey, tasks.MaximumKey)
mergedScope = scope.MergeByRange(NewScope(mergeRange, predicate))
s.Equal(predicate, mergedScope.Predicate)
s.Equal(mergeRange, mergedScope.Range)
}
func (s *scopeSuite) TestCanMergeByPredicate() {
r := NewRandomRange()
namespaceIDs := []string{uuid.New(), uuid.New(), uuid.New(), uuid.New()}
predicate := tasks.NewNamespacePredicate(namespaceIDs)
scope := NewScope(r, predicate)
s.True(scope.CanMergeByPredicate(scope))
s.True(scope.CanMergeByPredicate(NewScope(r, predicate)))
s.True(scope.CanMergeByPredicate(NewScope(r, tasks.NewTypePredicate([]enumsspb.TaskType{}))))
s.False(scope.CanMergeByPredicate(NewScope(NewRandomRange(), predicate)))
s.False(scope.CanMergeByPredicate(NewScope(NewRandomRange(), predicates.All[tasks.Task]())))
}
func (s *scopeSuite) TestMergeByPredicate_SamePredicateType() {
r := NewRandomRange()
namespaceIDs := []string{uuid.New(), uuid.New(), uuid.New(), uuid.New()}
predicate := tasks.NewNamespacePredicate(namespaceIDs)
scope := NewScope(r, predicate)
mergeNamespaceIDs := append(slices.Clone(namespaceIDs[:rand.Intn(len(namespaceIDs))]), uuid.New(), uuid.New())
mergePredicate := tasks.NewNamespacePredicate(mergeNamespaceIDs)
mergedScope := scope.MergeByPredicate(NewScope(r, mergePredicate))
s.Equal(r, mergedScope.Range)
for _, namespaceID := range namespaceIDs {
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
mockTask.EXPECT().GetNamespaceID().Return(namespaceID).MaxTimes(2)
s.True(mergedScope.Contains(mockTask))
}
for _, namespaceID := range mergeNamespaceIDs {
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
mockTask.EXPECT().GetNamespaceID().Return(namespaceID).MaxTimes(2)
s.True(mergedScope.Contains(mockTask))
}
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
mockTask.EXPECT().GetNamespaceID().Return(uuid.New()).Times(2)
s.False(mergedScope.Contains(mockTask))
}
func (s *scopeSuite) TestMergeByPredicate_DifferentPredicateType() {
r := NewRandomRange()
namespaceIDs := []string{uuid.New(), uuid.New(), uuid.New(), uuid.New()}
predicate := tasks.NewNamespacePredicate(namespaceIDs)
scope := NewScope(r, predicate)
mergeTaskTypes := []enumsspb.TaskType{
enumsspb.TaskType(rand.Intn(10)),
enumsspb.TaskType(rand.Intn(10)),
enumsspb.TaskType(rand.Intn(10)),
enumsspb.TaskType(rand.Intn(10)),
}
mergePredicate := tasks.NewTypePredicate(mergeTaskTypes)
mergedScope := scope.MergeByPredicate(NewScope(r, mergePredicate))
s.Equal(r, mergedScope.Range)
for _, namespaceID := range namespaceIDs {
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(namespaceID).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
for _, typeType := range mergeTaskTypes {
mockTask.EXPECT().GetType().Return(typeType).MaxTimes(1)
s.True(mergedScope.Contains(mockTask))
}
mockTask.EXPECT().GetType().Return(enumsspb.TaskType(rand.Intn(10) + 10)).MaxTimes(1)
s.True(mergedScope.Contains(mockTask))
}
mockTask := tasks.NewMockTask(s.controller)
mockTask.EXPECT().GetNamespaceID().Return(uuid.New()).AnyTimes()
mockTask.EXPECT().GetKey().Return(NewRandomKeyInRange(r)).AnyTimes()
for _, typeType := range mergeTaskTypes {
mockTask.EXPECT().GetType().Return(typeType).MaxTimes(1)
s.True(mergedScope.Contains(mockTask))
}
mockTask.EXPECT().GetType().Return(enumsspb.TaskType(rand.Intn(10) + 10)).Times(1)
s.False(mergedScope.Contains(mockTask))
}
| TestScopeSuite |
common.py | '''
This is a extended unittest module for Kivy, to make unittests based on
graphics with an OpenGL context.
The idea is to render a Widget tree, and after 1, 2 or more frames, a
screenshot will be made and be compared to the original one.
If no screenshot exists for the current test, the very first one will be used.
The screenshots live in the 'kivy/tests/results' folder and are in PNG format,
320x240 pixels.
'''
__all__ = ('GraphicUnitTest', 'UnitTestTouch', 'UTMotionEvent', 'async_run')
import unittest
import logging
import pytest
import sys
import os
import threading
from kivy.graphics.cgl import cgl_get_backend_name
from kivy.input.motionevent import MotionEvent
log = logging.getLogger('unittest')
_base = object
if 'mock' != cgl_get_backend_name():
# check what the gl backend might be, we can't know for sure
# what it'll be untill actually initialized by the window.
_base = unittest.TestCase
make_screenshots = os.environ.get('KIVY_UNITTEST_SCREENSHOTS')
http_server = None
http_server_ready = threading.Event()
kivy_eventloop = os.environ.get('KIVY_EVENTLOOP', 'asyncio')
def ensure_web_server():
if http_server is not None:
return True
def _start_web_server():
global http_server
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from SocketServer import TCPServer
except ImportError:
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
try:
handler = SimpleHTTPRequestHandler
handler.directory = os.path.join(
os.path.dirname(__file__), "..", "..")
http_server = TCPServer(
("", 8000), handler, bind_and_activate=False)
http_server.daemon_threads = True
http_server.allow_reuse_address = True
http_server.server_bind()
http_server.server_activate()
http_server_ready.set()
http_server.serve_forever()
except:
import traceback
traceback.print_exc()
finally:
http_server = None
http_server_ready.set()
th = threading.Thread(target=_start_web_server)
th.daemon = True
th.start()
http_server_ready.wait()
if http_server is None:
raise Exception("Unable to start webserver")
class GraphicUnitTest(_base):
framecount = 0
def _force_refresh(self, *largs):
# this prevent in some case to be stuck if the screen doesn't refresh
# and we wait for a number of self.framecount that never goes down
from kivy.base import EventLoop
win = EventLoop.window
if win and win.canvas:
win.canvas.ask_update()
def render(self, root, framecount=1):
'''Call rendering process using the `root` widget.
The screenshot will be done in `framecount` frames.
'''
from kivy.base import runTouchApp
from kivy.clock import Clock
self.framecount = framecount
try:
Clock.schedule_interval(self._force_refresh, 1)
runTouchApp(root)
finally:
Clock.unschedule(self._force_refresh)
# reset for the next test, but nobody will know if it will be used :/
if self.test_counter != 0:
self.tearDown(fake=True)
self.setUp()
def run(self, *args, **kwargs):
'''Extend the run of unittest, to check if results directory have been
found. If no results directory exists, the test will be ignored.
'''
from os.path import join, dirname, exists
results_dir = join(dirname(__file__), 'results')
if make_screenshots and not exists(results_dir):
log.warning('No result directory found, cancel test.')
os.mkdir(results_dir)
self.test_counter = 0
self.results_dir = results_dir
self.test_failed = False
return super(GraphicUnitTest, self).run(*args, **kwargs)
def setUp(self):
'''Prepare the graphic test, with:
- Window size fixed to 320x240
- Default kivy configuration
- Without any kivy input
'''
# use default kivy configuration (don't load user file.)
from os import environ
environ['KIVY_USE_DEFAULTCONFIG'] = '1'
# force window size + remove all inputs
from kivy.config import Config
Config.set('graphics', 'width', '320')
Config.set('graphics', 'height', '240')
for items in Config.items('input'):
Config.remove_option('input', items[0])
# bind ourself for the later screenshot
from kivy.core.window import Window
self.Window = Window
Window.bind(on_flip=self.on_window_flip)
# ensure our window is correctly created
Window.create_window()
Window.register()
Window.initialized = True
Window.canvas.clear()
Window.close = lambda *s: True
def on_window_flip(self, window):
'''Internal method to be called when the window have just displayed an
image.
When an image is showed, we decrement our framecount. If framecount is
come to 0, we are taking the screenshot.
The screenshot is done in a temporary place, and is compared to the
original one -> test ok/ko.
If no screenshot is available in the results directory, a new one will
be created.
'''
from kivy.base import EventLoop
from tempfile import mkstemp
from os.path import join, exists
from os import unlink, close
from shutil import move, copy
# don't save screenshot until we have enough frames.
# log.debug('framecount %d' % self.framecount)
# ! check if there is 'framecount', otherwise just
# ! assume zero e.g. if handling runTouchApp manually
self.framecount = getattr(self, 'framecount', 0) - 1
if self.framecount > 0:
return
# don't create screenshots if not requested manually
if not make_screenshots:
EventLoop.stop()
return
reffn = None
match = False
try:
# just get a temporary name
fd, tmpfn = mkstemp(suffix='.png', prefix='kivyunit-')
close(fd)
unlink(tmpfn)
# get a filename for the current unit test
self.test_counter += 1
test_uid = '%s-%d.png' % (
'_'.join(self.id().split('.')[-2:]),
self.test_counter)
# capture the screen
log.info('Capturing screenshot for %s' % test_uid)
tmpfn = window.screenshot(tmpfn)
log.info('Capture saved at %s' % tmpfn)
# search the file to compare to
reffn = join(self.results_dir, test_uid)
log.info('Compare with %s' % reffn)
# get sourcecode
import inspect
frame = inspect.getouterframes(inspect.currentframe())[6]
sourcecodetab, line = inspect.getsourcelines(frame[0])
line = frame[2] - line
currentline = sourcecodetab[line]
sourcecodetab[line] = '<span style="color: red;">%s</span>' % (
currentline)
sourcecode = ''.join(sourcecodetab)
sourcecodetab[line] = '>>>>>>>>\n%s<<<<<<<<\n' % currentline
sourcecodeask = ''.join(sourcecodetab)
if not exists(reffn):
log.info('No image reference, move %s as ref ?' % test_uid)
if self.interactive_ask_ref(sourcecodeask, tmpfn, self.id()):
move(tmpfn, reffn)
tmpfn = reffn
log.info('Image used as reference')
match = True
else:
log.info('Image discarded')
else:
from kivy.core.image import Image as CoreImage
s1 = CoreImage(tmpfn, keep_data=True)
sd1 = s1.image._data[0].data
s2 = CoreImage(reffn, keep_data=True)
sd2 = s2.image._data[0].data
if sd1 != sd2:
log.critical(
'%s at render() #%d, images are different.' % (
self.id(), self.test_counter))
if self.interactive_ask_diff(sourcecodeask,
tmpfn, reffn, self.id()):
log.critical('user ask to use it as ref.')
move(tmpfn, reffn)
tmpfn = reffn
match = True
else:
self.test_failed = True
else:
match = True
# generate html
from os.path import join, dirname, exists, basename
from os import mkdir
build_dir = join(dirname(__file__), 'build')
if not exists(build_dir):
mkdir(build_dir)
copy(reffn, join(build_dir, 'ref_%s' % basename(reffn)))
if tmpfn != reffn:
copy(tmpfn, join(build_dir, 'test_%s' % basename(reffn)))
with open(join(build_dir, 'index.html'), 'at') as fd:
color = '#ffdddd' if not match else '#ffffff'
fd.write('<div style="background-color: %s">' % color)
fd.write('<h2>%s #%d</h2>' % (self.id(), self.test_counter))
fd.write('<table><tr><th>Reference</th>'
'<th>Test</th>'
'<th>Comment</th>')
fd.write('<tr><td><img src="ref_%s"/></td>' %
basename(reffn))
if tmpfn != reffn:
fd.write('<td><img src="test_%s"/></td>' %
basename(reffn))
else:
fd.write('<td>First time, no comparison.</td>')
fd.write('<td><pre>%s</pre></td>' % sourcecode)
fd.write('</table></div>')
finally:
try:
if reffn != tmpfn:
unlink(tmpfn)
except:
pass
EventLoop.stop()
def tearDown(self, fake=False):
'''When the test is finished, stop the application, and unbind our
current flip callback.
'''
from kivy.base import stopTouchApp
from kivy.core.window import Window
Window.unbind(on_flip=self.on_window_flip)
stopTouchApp()
if not fake and self.test_failed:
self.assertTrue(False)
super(GraphicUnitTest, self).tearDown()
def interactive_ask_ref(self, code, imagefn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return True
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
image = Image.open(imagefn)
photo = ImageTk.PhotoImage(image)
Label(root, text='The test %s\nhave no reference.' % testid).pack()
Label(root, text='Use this image as a reference ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=photo).pack(side=LEFT)
Button(root, text='Use as reference', command=do_yes).pack(side=BOTTOM)
Button(root, text='Discard', command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def interactive_ask_diff(self, code, tmpfn, reffn, testid):
from os import environ
if 'UNITTEST_INTERACTIVE' not in environ:
return False
from tkinter import Tk, Label, LEFT, RIGHT, BOTTOM, Button
from PIL import Image, ImageTk
self.retval = False
root = Tk()
def do_close():
root.destroy()
def do_yes():
self.retval = True
do_close()
phototmp = ImageTk.PhotoImage(Image.open(tmpfn))
photoref = ImageTk.PhotoImage(Image.open(reffn))
Label(root, text='The test %s\nhave generated an different'
'image as the reference one..' % testid).pack()
Label(root, text='Which one is good ?').pack()
Label(root, text=code, justify=LEFT).pack(side=RIGHT)
Label(root, image=phototmp).pack(side=RIGHT)
Label(root, image=photoref).pack(side=LEFT)
Button(root, text='Use the new image -->',
command=do_yes).pack(side=BOTTOM)
Button(root, text='<-- Use the reference',
command=do_close).pack(side=BOTTOM)
root.mainloop()
return self.retval
def advance_frames(self, count):
'''Render the new frames and:
* tick the Clock
* dispatch input from all registered providers
* flush all the canvas operations
* redraw Window canvas if necessary
'''
from kivy.base import EventLoop
for i in range(count):
EventLoop.idle()
class UnitTestTouch(MotionEvent):
'''Custom MotionEvent representing a single touch. Similar to `on_touch_*`
methods from the Widget class, this one introduces:
* touch_down
* touch_move
* touch_up
Create a new touch with::
touch = UnitTestTouch(x, y)
then you press it on the default position with::
touch.touch_down()
or move it or even release with these simple calls::
touch.touch_move(new_x, new_y)
touch.touch_up()
'''
def __init__(self, x, y):
'''Create a MotionEvent instance with X and Y of the first
position a touch is at.
'''
from kivy.base import EventLoop
self.eventloop = EventLoop
win = EventLoop.window
super(UnitTestTouch, self).__init__(
# device, (tuio) id, args
self.__class__.__name__, 99, {
"x": x / float(win.width),
"y": y / float(win.height),
}
)
def touch_down(self, *args):
self.eventloop.post_dispatch_input("begin", self)
def touch_move(self, x, y):
win = self.eventloop.window
self.move({
"x": x / float(win.width),
"y": y / float(win.height)
})
self.eventloop.post_dispatch_input("update", self)
def touch_up(self, *args):
self.eventloop.post_dispatch_input("end", self)
def depack(self, args):
# set MotionEvent to touch
|
class UTMotionEvent(MotionEvent):
def depack(self, args):
self.is_touch = True
self.sx = args['x']
self.sy = args['y']
self.profile = ['pos']
super(UTMotionEvent, self).depack(args)
def async_run(func=None, app_cls_func=None):
def inner_func(func):
if 'mock' == cgl_get_backend_name():
return pytest.mark.skip(
reason='Skipping because gl backend is set to mock')(func)
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
return pytest.mark.skip(
reason='Skipping because graphics tests are not supported on '
'py3.5, only on py3.6+')(func)
if app_cls_func is not None:
func = pytest.mark.parametrize(
"kivy_app", [[app_cls_func], ], indirect=True)(func)
if kivy_eventloop == 'asyncio':
try:
import pytest_asyncio
return pytest.mark.asyncio(func)
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "asyncio" but '
'"pytest-asyncio" is not installed')(func)
elif kivy_eventloop == 'trio':
try:
import trio
from pytest_trio import trio_fixture
func._force_trio_fixture = True
return func
except ImportError:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP == "trio" but '
'"pytest-trio" is not installed')(func)
else:
return pytest.mark.skip(
reason='KIVY_EVENTLOOP must be set to either of "asyncio" or '
'"trio" to run async tests')(func)
if func is None:
return inner_func
return inner_func(func)
| self.is_touch = True
# set sx/sy properties to ratio (e.g. X / win.width)
self.sx = args['x']
self.sy = args['y']
# set profile to accept x, y and pos properties
self.profile = ['pos']
# run depack after we set the values
super(UnitTestTouch, self).depack(args) |
check_english.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from re import sub
from typing import Any, List, Text
from functools import reduce
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import Message
from rasa.nlu.training_data import TrainingData
import string
class CheckEnglish(Component):
| provides = ["text"]
alphabet = ['a', 'b', 'c', 'd']
stopwords = ["a","about","above","after","again","against","ain","all","am","an","and","any","are","aren","aren't","as","at","be","because","been","before","being","below","between","both","but","by","can","couldn","couldn't","d","did","didn","didn't","do","does","doesn","doesn't","doing","don","don't","down","during","each","few","for","from","further","had","hadn","hadn't","has","hasn","hasn't","have","haven","haven't","having","he","her","here","hers","herself","him","himself","his","how","i","if","in","into","is","isn","isn't","it","it's","its","itself","just","ll","m","ma","me","mightn","mightn't","more","most","mustn","mustn't","my","myself","needn","needn't","no","nor","not","now","o","of","off","on","once","only","or","other","our","ours","ourselves","out","over","own","re","s","same","shan","shan't","she","she's","should","should've","shouldn","shouldn't","so","some","such","t","than","that","that'll","the","their","theirs","them","themselves","then","there","these","they","this","those","through","to","too","under","until","up","ve","very","was","wasn","wasn't","we","were","weren","weren't","what","when","where","which","while","who","whom","why","will","with","won","won't","wouldn","wouldn't","y","you","you'd","you'll","you're","you've","your","yours","yourself","yourselves","could","he'd","he'll","he's","here's","how's","i'd","i'll","i'm","i've","let's","ought","she'd","she'll","that's","there's","they'd","they'll","they're","they've","we'd","we'll","we're","we've","what's","when's","where's","who's","why's","would","able","abst","accordance","according","accordingly","across","act","actually","added","adj","affected","affecting","affects","afterwards","ah","almost","alone","along","already","also","although","always","among","amongst","announce","another","anybody","anyhow","anymore","anyone","anything","anyway","anyways","anywhere","apparently","approximately","arent","arise","around","aside","ask","asking","auth","available","away","awfully","b","back","became","become","becomes","becoming","beforehand","begin","beginning","beginnings","begins","behind","believe","beside","besides","beyond","biol","brief","briefly","c","ca","came","cannot","can't","cause","causes","certain","certainly","co","com","come","comes","contain","containing","contains","couldnt","date","different","done","downwards","due","e","ed","edu","effect","eg","eight","eighty","either","else","elsewhere","end","ending","enough","especially","et","etc","even","ever","every","everybody","everyone","everything","everywhere","ex","except","f","far","ff","fifth","first","five","fix","followed","following","follows","former","formerly","forth","found","four","furthermore","g","gave","get","gets","getting","give","given","gives","giving","go","goes","gone","got","gotten","h","happens","hardly","hed","hence","hereafter","hereby","herein","heres","hereupon","hes","hi","hid","hither","home","howbeit","however","hundred","id","ie","im","immediate","immediately","importance","important","inc","indeed","index","information","instead","invention","inward","itd","it'll","j","k","keep","keeps","kept","kg","km","know","known","knows","l","largely","last","lately","later","latter","latterly","least","less","lest","let","lets","like","liked","likely","line","little","'ll","look","looking","looks","ltd","made","mainly","make","makes","many","may","maybe","mean","means","meantime","meanwhile","merely","mg","might","million","miss","ml","moreover","mostly","mr","mrs","much","mug","must","n","na","name","namely","nay","nd","near","nearly","necessarily","necessary","need","needs","neither","never","nevertheless","new","next","nine","ninety","nobody","non","none","nonetheless","noone","normally","nos","noted","nothing","nowhere","obtain","obtained","obviously","often","oh","ok","okay","old","omitted","one","ones","onto","ord","others","otherwise","outside","overall","owing","p","page","pages","part","particular","particularly","past","per","perhaps","placed","please","plus","poorly","possible","possibly","potentially","pp","predominantly","present","previously","primarily","probably","promptly","proud","provides","put","q","que","quickly","quite","qv","r","ran","rather","rd","readily","really","recent","recently","ref","refs","regarding","regardless","regards","related","relatively","research","respectively","resulted","resulting","results","right","run","said","saw","say","saying","says","sec","section","see","seeing","seem","seemed","seeming","seems","seen","self","selves","sent","seven","several","shall","shed","shes","show","showed","shown","showns","shows","significant","significantly","similar","similarly","since","six","slightly","somebody","somehow","someone","somethan","something","sometime","sometimes","somewhat","somewhere","soon","sorry","specifically","specified","specify","specifying","still","stop","strongly","sub","substantially","successfully","sufficiently","suggest","sup","sure","take","taken","taking","tell","tends","th","thank","thanks","thanx","thats","that've","thence","thereafter","thereby","thered","therefore","therein","there'll","thereof","therere","theres","thereto","thereupon","there've","theyd","theyre","think","thou","though","thoughh","thousand","throug","throughout","thru","thus","til","tip","together","took","toward","towards","tried","tries","truly","try","trying","ts","twice","two","u","un","unfortunately","unless","unlike","unlikely","unto","upon","ups","us","use","used","useful","usefully","usefulness","uses","using","usually","v","value","various","'ve","via","viz","vol","vols","vs","w","want","wants","wasnt","way","wed","welcome","went","werent","whatever","what'll","whats","whence","whenever","whereafter","whereas","whereby","wherein","wheres","whereupon","wherever","whether","whim","whither","whod","whoever","whole","who'll","whomever","whos","whose","widely","willing","wish","within","without","wont","words","world","wouldnt","www","x","yes","yet","youd","youre","z","zero","a's","ain't","allow","allows","apart","appear","appreciate","appropriate","associated","best","better","c'mon","c's","cant","changes","clearly","concerning","consequently","consider","considering","corresponding","course","currently","definitely","described","despite","entirely","exactly","example","going","greetings","hello","help","hopefully","ignored","inasmuch","indicate","indicated","indicates","inner","insofar","it'd","keep","keeps","novel","presumably","reasonably","second","secondly","sensible","serious","seriously","sure","t's","third","thorough","thoroughly","three","well","wonder"]
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUModelConfig, **Any) -> None
for example in training_data.training_examples:
example.text = self.preprocess(example.text)
example.set("text", example.text)
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
message.text = self.preprocess(message.get('text'))
message.set("text", message.text)
def english_word_count(self, word):
alph = list(string.ascii_lowercase)
count = 0
for ch in word:
if ch in alph:
count += 1
return count
def preprocess(self, text):
text = text.lower()
alph = list(string.ascii_lowercase)
new_text = ''
for word in text.split():
count = self.english_word_count(word)
if word in self.stopwords:
continue
if count / len(word) > 0.6:
new_text += word + ' '
return new_text[:-1] |
|
types.rs | extern crate proc_macro2;
extern crate proc_quote;
use std::borrow::Cow;
use proc_macro2::{Ident, Span};
use proc_quote::quote;
#[test]
fn test_integer() {
let ii8 = -1i8;
let ii16 = -1i16;
let ii32 = -1i32;
let ii64 = -1i64;
let iisize = -1isize;
let uu8 = 1u8;
let uu16 = 1u16;
let uu32 = 1u32;
let uu64 = 1u64;
let uusize = 1usize;
let tokens = quote! {
#ii8 #ii16 #ii32 #ii64 #iisize
#uu8 #uu16 #uu32 #uu64 #uusize
};
let expected = "- 1i8 - 1i16 - 1i32 - 1i64 - 1isize 1u8 1u16 1u32 1u64 1usize";
assert_eq!(expected, tokens.to_string());
}
#[test]
fn test_floating() {
let e32 = 2.345f32;
let e64 = 2.345f64;
let tokens = quote! {
#e32
#e64
};
let expected = concat!("2.345f32 2.345f64");
assert_eq!(expected, tokens.to_string()); |
#[cfg(integer128)]
#[test]
fn test_integer128() {
let ii128 = -1i128;
let uu128 = 1u128;
let tokens = quote! {
#ii128 #uu128
};
let expected = "-1i128 1u128";
assert_eq!(expected, tokens.to_string());
}
#[test]
fn test_char() {
let zero = '\0';
let pound = '#';
let quote = '"';
let apost = '\'';
let newline = '\n';
// ISSUE #19 https://github.com/Goncalerta/proc-quote/issues/19
//let heart = '\u{2764}';
let tokens = quote! {
#zero #pound #quote #apost #newline
};
let expected = r#"'\u{0}' '#' '"' '\'' '\n'"#;
assert_eq!(expected, tokens.to_string());
}
#[test]
fn test_str() {
let s = "\0 a 'b \" c";
let tokens = quote!(#s);
let expected = r#""\u{0} a 'b \" c""#;
assert_eq!(expected, tokens.to_string());
}
#[test]
fn test_string() {
let s = "\0 a 'b \" c".to_string();
let tokens = quote!(#s);
let expected = r#""\u{0} a 'b \" c""#;
assert_eq!(expected, tokens.to_string());
}
#[test]
fn test_box_str() {
let b = "str".to_owned().into_boxed_str();
let tokens = quote!( #b );
assert_eq!("\"str\"", tokens.to_string());
}
#[test]
fn test_cow() {
let owned: Cow<Ident> = Cow::Owned(Ident::new("owned", Span::call_site()));
let ident = Ident::new("borrowed", Span::call_site());
let borrowed = Cow::Borrowed(&ident);
let tokens = quote!( #owned #borrowed );
assert_eq!("owned borrowed", tokens.to_string());
} | } |
grpc_handler.go | // Package merklesrv contains implementations for merkle server API.
package merklesrv
import (
"context"
"encoding/json"
"errors"
"fmt"
"strconv"
"github.com/ucbrise/MerkleSquare/grpcint"
"github.com/immesys/bw2/crypto"
)
func (s *Server) Register(ctx context.Context, req *grpcint.RegisterRequest) (
*grpcint.RegisterResponse, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
position, err := s.RegisterUserKey(ctx, req.GetUsr().GetUsername(),
req.GetKey().GetMk(), req.GetSignature(), true)
if err != nil {
return nil, err
}
return &grpcint.RegisterResponse{
Pos: &grpcint.Position{Pos: position},
VrfKey: s.vrfPrivKey.Compute(req.GetUsr().GetUsername()),
}, nil
}
func (s *Server) Append(stream grpcint.MerkleSquare_AppendServer) error {
req, err := stream.Recv()
if err != nil {
return err
}
ctx := context.Background()
user, key := req.GetUsr().GetUsername(), req.GetEk().GetEk()
mkSerialized, _ := s.Storage.Get(ctx, append(user, []byte("MK")...))
queryString := append(user, []byte("PK")...)
if mkSerialized == nil {
return errors.New("User is not registered")
}
var mk KeyRecord
err = json.Unmarshal(mkSerialized, &mk)
if err != nil {
return err
}
s.LastPosLock.Lock()
s.appendLock.Lock()
position := s.LastPos
s.LastPos++
//Send position
var response = &grpcint.AppendResponse{
Pos: &grpcint.Position{Pos: position},
}
stream.Send(response)
req, err = stream.Recv()
if err != nil {
return err
}
signature := req.GetSignature()
//Verify
if !crypto.VerifyBlob(mk.Key, signature,
append(key, []byte(strconv.Itoa(int(position)))...)) {
return errors.New("Verification failed")
}
//Add to merkle tree
s.MerkleSquare.Append(s.vrfPrivKey.Compute(user), key, signature)
s.appendLock.Unlock()
s.LastPosLock.Unlock()
//4. Add to K-V store
var serializedKey []byte
// Prepend to existing entry
original, _ := s.Storage.Get(ctx, queryString)
keyrecord := make([]KeyRecord, 1)
keyrecord[0] = KeyRecord{
Position: position,
Signature: signature,
Key: key,
}
if original == nil {
serializedKey, _ = json.Marshal(keyrecord)
} else {
var deserialized []KeyRecord
json.Unmarshal(original, &deserialized)
serializedKey, _ = json.Marshal(append(keyrecord, deserialized...))
}
s.Storage.Put(ctx, queryString, serializedKey)
response.VrfKey = s.vrfPrivKey.Compute(req.GetUsr().GetUsername())
response.Completed = true
stream.Send(response)
return nil
}
func (s *Server) LookUpMK(ctx context.Context, req *grpcint.LookUpMKRequest) (
*grpcint.LookUpMKResponse, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
key, sign, pos, err := s.GetUserKey(ctx, req.GetUsr().GetUsername(), true, 0)
if err != nil {
return nil, err
}
return &grpcint.LookUpMKResponse{
Imk: &grpcint.IndexedMK{
Pos: &grpcint.Position{Pos: pos},
MasterKey: &grpcint.MasterKey{Mk: key},
},
Signature: sign,
VrfKey: s.vrfPrivKey.Compute(req.GetUsr().GetUsername()),
}, nil
}
func (s *Server) LookUpPK(ctx context.Context, req *grpcint.LookUpPKRequest) (
*grpcint.LookUpPKResponse, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
var reqPos uint64
if req.GetPos() == nil {
s.epochLock.RLock()
reqPos = s.PublishedPos
s.epochLock.RUnlock()
} else {
reqPos = req.GetPos().GetPos()
}
key, sign, pos, err := s.GetUserKey(ctx, req.GetUsr().GetUsername(), false, reqPos)
if err != nil {
return nil, err
}
return &grpcint.LookUpPKResponse{
Iek: &grpcint.IndexedEK{
Pos: &grpcint.Position{Pos: pos},
PublicKey: &grpcint.EncryptionKey{Ek: key},
},
Signature: sign,
VrfKey: s.vrfPrivKey.Compute(req.GetUsr().GetUsername()),
}, nil
}
func (s *Server) LookUpMKVerify(ctx context.Context,
req *grpcint.LookUpMKVerifyRequest) (
*grpcint.LookUpMKVerifyResponse, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
if req.Size == 0 {
s.epochLock.RLock()
req.Size = s.PublishedPos
s.epochLock.RUnlock()
}
key, sign, pos, err := s.GetUserKey(ctx, req.GetUsr().GetUsername(), true, 0)
vrfKey := s.vrfPrivKey.Compute(req.GetUsr().GetUsername())
if err != nil {
return nil, err
}
proof := s.MerkleSquare.ProveFirst(vrfKey, key, uint32(pos), uint32(req.Size))
marshaledProof, err := json.Marshal(proof)
return &grpcint.LookUpMKVerifyResponse{
Imk: &grpcint.IndexedMK{
Pos: &grpcint.Position{Pos: pos},
MasterKey: &grpcint.MasterKey{Mk: key},
},
Signature: sign,
VrfKey: vrfKey,
Proof: marshaledProof,
}, err
}
func (s *Server) LookUpPKVerify(ctx context.Context, req *grpcint.LookUpPKVerifyRequest) (
*grpcint.LookUpPKVerifyResponse, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
if req.Size == 0 {
s.epochLock.RLock()
req.Size = s.PublishedPos
s.epochLock.RUnlock()
}
key, sign, pos, err := s.GetUserKey(ctx, req.GetUsr().GetUsername(), false, req.Size)
vrfKey := s.vrfPrivKey.Compute(req.GetUsr().GetUsername())
if err != nil {
return nil, err
}
proof := s.MerkleSquare.ProveLatest(vrfKey, key, uint32(pos), uint32(req.Size))
marshaledProof, err := json.Marshal(proof)
return &grpcint.LookUpPKVerifyResponse{
Iek: &grpcint.IndexedEK{
Pos: &grpcint.Position{Pos: pos},
PublicKey: &grpcint.EncryptionKey{Ek: key},
},
Signature: sign,
VrfKey: vrfKey,
Proof: marshaledProof,
}, err
}
func (s *Server) GetNewCheckPoint(ctx context.Context,
req *grpcint.GetNewCheckPointRequest) (
*grpcint.GetNewCheckPointResponse, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
if req.OldSize > s.PublishedPos {
return nil, errors.New("Auditor expects more leaves than what server published")
}
digest, err := json.Marshal(s.PublishedDigest)
if err != nil {
return nil, err
}
var marshaledProof []byte
proof, ok := s.extensionProofCache[ExtensionProofKey{req.OldSize, s.PublishedPos}]
if s.CacheExtensionProofs && ok {
marshaledProof = proof
} else {
extensionProof := s.MerkleSquare.GenerateExtensionProof(
uint32(req.OldSize), uint32(s.PublishedPos))
marshaledProof, err = json.Marshal(extensionProof)
if err != nil {
return nil, err
}
if s.CacheExtensionProofs {
s.extensionProofCache[ExtensionProofKey{req.OldSize, s.PublishedPos}] = marshaledProof
}
}
return &grpcint.GetNewCheckPointResponse{
CkPoint: &grpcint.CheckPoint{
MarshaledDigest: digest,
NumLeaves: s.PublishedPos,
Epoch: s.epoch,
},
Proof: marshaledProof,
}, nil
}
func (s *Server) GetMasterKeyProof(ctx context.Context,
req *grpcint.GetMasterKeyProofRequest) (
*grpcint.GetMasterKeyProofResponse, error) {
if err := ctx.Err(); err != nil |
proof := s.MerkleSquare.ProveNonexistence(
s.vrfPrivKey.Compute(req.GetUsr().GetUsername()),
uint32(req.GetPos().GetPos()), uint32(req.Size))
marshaledProof, err := json.Marshal(proof)
if err != nil {
return nil, err
}
return &grpcint.GetMasterKeyProofResponse{
Proof: marshaledProof,
}, nil
}
func (s *Server) GetPublicKeyProof(ctx context.Context,
req *grpcint.GetPublicKeyProofRequest) (*grpcint.GetPublicKeyProofResponse, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
if req.Size == 0 {
s.epochLock.RLock()
req.Size = s.PublishedPos
s.epochLock.RUnlock()
}
proof := s.MerkleSquare.GenerateExistenceProof(
s.vrfPrivKey.Compute(req.GetUsr().GetUsername()), uint32(req.GetPos().GetPos()),
req.GetHeight(), uint32(req.Size))
marshaledProof, err := json.Marshal(proof)
if err != nil {
return nil, err
}
return &grpcint.GetPublicKeyProofResponse{
Proof: marshaledProof,
}, nil
}
func (s *Server) GetMonitoringProofForTest(ctx context.Context,
req *grpcint.GetMonitoringProofForTestRequest) (
*grpcint.GetMonitoringProofForTestResponse, error) {
if err := ctx.Err(); err != nil {
return nil, err
}
if req.Size == 0 {
s.epochLock.RLock()
req.Size = s.PublishedPos
s.epochLock.RUnlock()
}
proof := s.MerkleSquare.GenerateExistenceProof(
s.vrfPrivKey.Compute(req.GetUsr().GetUsername()),
uint32(req.GetPos().GetPos()), req.GetHeight(), uint32(req.Size))
marshaledProof, err := json.Marshal(proof)
if err != nil {
fmt.Println(uint32(req.GetPos().GetPos()))
fmt.Println(req.GetHeight())
fmt.Println(uint32(req.Size))
fmt.Println(err.Error())
return nil, err
}
return &grpcint.GetMonitoringProofForTestResponse{
Proof: marshaledProof,
}, nil
}
func (s *Server) GetLookUpProof(ctx context.Context,
req *grpcint.GetLookUpProofRequest) (
*grpcint.GetLookUpProofResponse, error) {
var err error
if err := ctx.Err(); err != nil {
return nil, err
}
var marshaledProof []byte
if req.GetIsMasterKey() {
proof := s.MerkleSquare.ProveFirst(
s.vrfPrivKey.Compute(req.GetUsr().GetUsername()),
req.GetMasterKey().GetMk(), uint32(req.GetPos().GetPos()), uint32(req.Size))
marshaledProof, err = json.Marshal(proof)
} else {
proof := s.MerkleSquare.ProveLatest(
s.vrfPrivKey.Compute(req.GetUsr().GetUsername()),
req.GetEncryptionKey().GetEk(), uint32(req.GetPos().GetPos()), uint32(req.Size))
marshaledProof, err = json.Marshal(proof)
}
if err != nil {
return nil, err
}
return &grpcint.GetLookUpProofResponse{
Proof: marshaledProof,
}, nil
}
| {
return nil, err
} |
watch.py | from logging import getLogger
from pathlib import Path
from typing import List, Optional
from typer import Argument
from deckz.cli import app
from deckz.paths import Paths
from deckz.watching import watch as watching_watch
_logger = getLogger(__name__)
@app.command()
def watch(
targets: Optional[List[str]] = Argument(None),
handout: bool = False,
presentation: bool = True,
print: bool = False,
minimum_delay: int = 5, | deck_path: Path = Path("."),
) -> None:
"""Compile on change."""
_logger.info("Watching current and shared directories")
watching_watch(
minimum_delay=minimum_delay,
paths=Paths.from_defaults(deck_path),
build_handout=handout,
build_presentation=presentation,
build_print=print,
target_whitelist=targets,
) | |
mod.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! The PeerManager module is responsible for establishing connections between Peers and for
//! opening/receiving new substreams on those connections.
//!
//! ## Implementation
//!
//! The PeerManager is implemented as a number of actors:
//! * A main event loop actor which is responsible for handling requests and sending
//! notification about new/lost Peers to the rest of the network stack.
//! * An actor responsible for dialing and listening for new connections.
use crate::{
counters,
interface::{NetworkNotification, NetworkProvider, NetworkRequest},
logging::*,
peer::DisconnectReason,
protocols::{
direct_send::Message,
rpc::{error::RpcError, InboundRpcRequest, OutboundRpcRequest},
},
transport,
transport::{Connection, ConnectionId, ConnectionMetadata},
ProtocolId,
};
use bytes::Bytes;
use channel::{self, libra_channel};
use futures::{
channel::oneshot,
future::{BoxFuture, FutureExt},
io::{AsyncRead, AsyncWrite, AsyncWriteExt},
sink::SinkExt,
stream::{Fuse, FuturesUnordered, StreamExt},
};
use libra_config::network_id::NetworkContext;
use libra_logger::prelude::*;
use libra_network_address::NetworkAddress;
use libra_types::PeerId;
use netcore::transport::{ConnectionOrigin, Transport};
use serde::Serialize;
use std::{
collections::{hash_map::Entry, HashMap},
fmt::Debug,
marker::PhantomData,
sync::Arc,
time::Duration,
};
use tokio::runtime::Handle;
pub mod builder;
pub mod conn_notifs_channel;
mod error;
#[cfg(test)]
mod tests;
pub use self::error::PeerManagerError;
/// Request received by PeerManager from upstream actors.
#[derive(Debug, Serialize)]
pub enum PeerManagerRequest {
/// Send an RPC request to a remote peer.
SendRpc(PeerId, OutboundRpcRequest),
/// Fire-and-forget style message send to a remote peer.
SendMessage(PeerId, Message),
}
/// Notifications sent by PeerManager to upstream actors.
#[derive(Debug)]
pub enum PeerManagerNotification {
/// A new RPC request has been received from a remote peer.
RecvRpc(PeerId, InboundRpcRequest),
/// A new message has been received from a remote peer.
RecvMessage(PeerId, Message),
}
#[derive(Debug, Serialize)]
pub enum ConnectionRequest {
DialPeer(
PeerId,
NetworkAddress,
#[serde(skip)] oneshot::Sender<Result<(), PeerManagerError>>,
),
DisconnectPeer(
PeerId,
#[serde(skip)] oneshot::Sender<Result<(), PeerManagerError>>,
),
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize)]
pub enum ConnectionNotification {
/// Connection with a new peer has been established.
NewPeer(
PeerId,
NetworkAddress,
ConnectionOrigin,
Arc<NetworkContext>,
),
/// Connection to a peer has been terminated. This could have been triggered from either end.
LostPeer(PeerId, NetworkAddress, ConnectionOrigin, DisconnectReason),
}
/// Convenience wrapper which makes it easy to issue communication requests and await the responses
/// from PeerManager.
#[derive(Clone)]
pub struct PeerManagerRequestSender {
inner: libra_channel::Sender<(PeerId, ProtocolId), PeerManagerRequest>,
}
/// Convenience wrapper which makes it easy to issue connection requests and await the responses
/// from PeerManager.
#[derive(Clone)]
pub struct ConnectionRequestSender {
inner: libra_channel::Sender<PeerId, ConnectionRequest>,
}
impl PeerManagerRequestSender {
/// Construct a new PeerManagerRequestSender with a raw channel::Sender
pub fn new(inner: libra_channel::Sender<(PeerId, ProtocolId), PeerManagerRequest>) -> Self {
Self { inner }
}
/// Send a fire-and-forget direct-send message to remote peer.
///
/// The function returns when the message has been enqueued on the network actor's event queue.
/// It therefore makes no reliable delivery guarantees. An error is returned if the event queue
/// is unexpectedly shutdown.
pub fn send_to(
&mut self,
peer_id: PeerId,
protocol: ProtocolId,
mdata: Bytes,
) -> Result<(), PeerManagerError> {
self.inner.push(
(peer_id, protocol),
PeerManagerRequest::SendMessage(peer_id, Message { protocol, mdata }),
)?;
Ok(())
}
/// Send the _same_ message to many recipients using the direct-send protocol.
///
/// This method is an optimization so that we can avoid serializing and
/// copying the same message many times when we want to sent a single message
/// to many peers. Note that the `Bytes` the messages is serialized into is a
/// ref-counted byte buffer, so we can avoid excess copies as all direct-sends
/// will share the same underlying byte buffer.
///
/// The function returns when all send requests have been enqueued on the network
/// actor's event queue. It therefore makes no reliable delivery guarantees.
/// An error is returned if the event queue is unexpectedly shutdown.
pub fn send_to_many(
&mut self,
recipients: impl Iterator<Item = PeerId>,
protocol: ProtocolId,
mdata: Bytes,
) -> Result<(), PeerManagerError> {
let msg = Message { protocol, mdata };
for recipient in recipients {
// We return `Err` early here if the send fails. Since sending will
// only fail if the queue is unexpectedly shutdown (i.e., receiver
// dropped early), we know that we can't make further progress if
// this send fails.
self.inner.push(
(recipient, protocol),
PeerManagerRequest::SendMessage(recipient, msg.clone()),
)?;
}
Ok(())
}
/// Sends a unary RPC to a remote peer and waits to either receive a response or times out.
pub async fn send_rpc(
&mut self,
peer_id: PeerId,
protocol: ProtocolId,
req: Bytes,
timeout: Duration,
) -> Result<Bytes, RpcError> {
let (res_tx, res_rx) = oneshot::channel();
let request = OutboundRpcRequest {
protocol,
data: req,
res_tx,
timeout,
};
self.inner.push(
(peer_id, protocol),
PeerManagerRequest::SendRpc(peer_id, request),
)?;
res_rx.await?
}
}
impl ConnectionRequestSender {
/// Construct a new ConnectionRequestSender with a raw libra_channel::Sender
pub fn new(inner: libra_channel::Sender<PeerId, ConnectionRequest>) -> Self {
Self { inner }
}
pub async fn dial_peer(
&mut self,
peer: PeerId,
addr: NetworkAddress,
) -> Result<(), PeerManagerError> {
let (oneshot_tx, oneshot_rx) = oneshot::channel();
self.inner
.push(peer, ConnectionRequest::DialPeer(peer, addr, oneshot_tx))?;
oneshot_rx.await?
}
pub async fn disconnect_peer(&mut self, peer: PeerId) -> Result<(), PeerManagerError> {
let (oneshot_tx, oneshot_rx) = oneshot::channel();
self.inner
.push(peer, ConnectionRequest::DisconnectPeer(peer, oneshot_tx))?;
oneshot_rx.await?
}
}
/// Responsible for handling and maintaining connections to other Peers
pub struct PeerManager<TTransport, TSocket>
where
TTransport: Transport,
TSocket: AsyncRead + AsyncWrite,
{
network_context: Arc<NetworkContext>,
/// A handle to a tokio executor.
executor: Handle,
/// Address to listen on for incoming connections.
listen_addr: NetworkAddress,
/// Connection Listener, listening on `listen_addr`
transport_handler: Option<TransportHandler<TTransport, TSocket>>,
/// Map from PeerId to corresponding Peer object.
active_peers: HashMap<
PeerId,
(
ConnectionMetadata,
libra_channel::Sender<ProtocolId, NetworkRequest>,
),
>,
/// Channel to receive requests from other actors.
requests_rx: libra_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>,
/// Upstream handlers for RPC and DirectSend protocols. The handlers are promised fair delivery
/// of messages across (PeerId, ProtocolId).
upstream_handlers:
HashMap<ProtocolId, libra_channel::Sender<(PeerId, ProtocolId), PeerManagerNotification>>,
/// Channels to send NewPeer/LostPeer notifications to.
connection_event_handlers: Vec<conn_notifs_channel::Sender>,
/// Channel used to send Dial requests to the ConnectionHandler actor
transport_reqs_tx: channel::Sender<TransportRequest>,
/// Sender for connection events.
transport_notifs_tx: channel::Sender<TransportNotification<TSocket>>,
/// Receiver for connection requests.
connection_reqs_rx: libra_channel::Receiver<PeerId, ConnectionRequest>,
/// Receiver for connection events.
transport_notifs_rx: channel::Receiver<TransportNotification<TSocket>>,
/// A map of outstanding disconnect requests.
outstanding_disconnect_requests:
HashMap<ConnectionId, oneshot::Sender<Result<(), PeerManagerError>>>,
/// Pin the transport type corresponding to this PeerManager instance
phantom_transport: PhantomData<TTransport>,
/// Maximum concurrent network requests to any peer.
max_concurrent_network_reqs: usize,
/// Maximum concurrent network notifications processed for a peer.
max_concurrent_network_notifs: usize,
/// Size of channels between different actors.
channel_size: usize,
/// Max network frame size
max_frame_size: usize,
}
impl<TTransport, TSocket> PeerManager<TTransport, TSocket>
where
TTransport: Transport<Output = Connection<TSocket>> + Send + 'static,
TSocket: transport::TSocket,
{
/// Construct a new PeerManager actor
#[allow(clippy::too_many_arguments)]
pub fn new(
executor: Handle,
transport: TTransport,
network_context: Arc<NetworkContext>,
listen_addr: NetworkAddress,
requests_rx: libra_channel::Receiver<(PeerId, ProtocolId), PeerManagerRequest>,
connection_reqs_rx: libra_channel::Receiver<PeerId, ConnectionRequest>,
upstream_handlers: HashMap<
ProtocolId,
libra_channel::Sender<(PeerId, ProtocolId), PeerManagerNotification>,
>,
connection_event_handlers: Vec<conn_notifs_channel::Sender>,
channel_size: usize,
max_concurrent_network_reqs: usize,
max_concurrent_network_notifs: usize,
max_frame_size: usize,
) -> Self {
let (transport_notifs_tx, transport_notifs_rx) = channel::new(
channel_size,
&counters::PENDING_CONNECTION_HANDLER_NOTIFICATIONS,
);
let (transport_reqs_tx, transport_reqs_rx) =
channel::new(channel_size, &counters::PENDING_PEER_MANAGER_DIAL_REQUESTS);
//TODO now that you can only listen on a socket inside of a tokio runtime we'll need to
// rethink how we init the PeerManager so we don't have to do this funny thing.
let transport_notifs_tx_clone = transport_notifs_tx.clone();
let (transport_handler, listen_addr) = executor.enter(|| {
TransportHandler::new(
network_context.clone(),
transport,
listen_addr,
transport_reqs_rx,
transport_notifs_tx_clone,
)
});
Self {
network_context,
executor,
listen_addr,
transport_handler: Some(transport_handler),
active_peers: HashMap::new(),
requests_rx,
connection_reqs_rx,
transport_reqs_tx,
transport_notifs_tx,
transport_notifs_rx,
outstanding_disconnect_requests: HashMap::new(),
phantom_transport: PhantomData,
upstream_handlers,
connection_event_handlers,
max_concurrent_network_reqs,
max_concurrent_network_notifs,
channel_size,
max_frame_size,
}
}
/// Get the [`NetworkAddress`] we're listening for incoming connections on
pub fn listen_addr(&self) -> &NetworkAddress {
&self.listen_addr
}
/// Start listening on the set address and return a future which runs PeerManager
pub async fn start(mut self) {
// Start listening for connections.
send_struct_log!(
network_log(network_events::PEER_MANAGER_LOOP, &self.network_context)
.data(network_events::TYPE, network_events::START)
);
self.start_connection_listener();
loop {
::futures::select! {
connection_event = self.transport_notifs_rx.select_next_some() => {
send_struct_log!(network_log(network_events::PEER_MANAGER_LOOP, &self.network_context)
.data(network_events::TYPE, "connection_event")
.data(network_events::EVENT, &connection_event)
);
self.handle_connection_event(connection_event);
}
request = self.requests_rx.select_next_some() => {
send_struct_log!(network_log(network_events::PEER_MANAGER_LOOP, &self.network_context)
.data(network_events::TYPE, "request")
.field(network_events::PEER_MANAGER_REQUEST, &request)
);
self.handle_request(request).await;
}
connection_request = self.connection_reqs_rx.select_next_some() => {
send_struct_log!(network_log(network_events::PEER_MANAGER_LOOP, &self.network_context)
.data(network_events::TYPE, "connection_request")
.field(network_events::CONNECTION_REQUEST, &connection_request)
);
self.handle_connection_request(connection_request).await;
}
complete => {
// TODO: This should be ok when running in client mode.
send_struct_log!(network_log(network_events::PEER_MANAGER_LOOP, &self.network_context)
.data(network_events::TYPE, network_events::TERMINATION)
.critical()
);
crit!("{} Peer manager actor terminated", self.network_context);
break;
}
}
}
}
fn handle_connection_event(&mut self, event: TransportNotification<TSocket>) {
trace!(
"{} TransportNotification::{:?}",
self.network_context,
event
);
match event {
TransportNotification::NewConnection(conn) => {
info!(
"{} New connection established: {:?}",
self.network_context, conn,
);
// Update libra_network_peer counter.
self.add_peer(conn);
counters::LIBRA_NETWORK_PEERS
.with_label_values(&[self.network_context.role().as_str(), "connected"])
.set(self.active_peers.len() as i64);
}
TransportNotification::Disconnected(lost_conn_metadata, reason) => {
// See: https://github.com/libra/libra/issues/3128#issuecomment-605351504 for
// detailed reasoning on `Disconnected` events should be handled correctly.
info!(
"{} Connection {:?} closed due to {:?}",
self.network_context, lost_conn_metadata, reason,
);
let peer_id = lost_conn_metadata.peer_id();
// If the active connection with the peer is lost, remove it from `active_peers`.
if let Entry::Occupied(entry) = self.active_peers.entry(peer_id) {
let (conn_metadata, _) = entry.get();
if conn_metadata.connection_id() == lost_conn_metadata.connection_id() {
// We lost an active connection.
entry.remove();
}
}
counters::LIBRA_NETWORK_PEERS
.with_label_values(&[self.network_context.role().as_str(), "connected"])
.set(self.active_peers.len() as i64);
// If the connection was explicitly closed by an upstream client, send an ACK.
if let Some(oneshot_tx) = self
.outstanding_disconnect_requests
.remove(&lost_conn_metadata.connection_id())
{
// The client explicitly closed the connection and it should be notified.
if let Err(send_err) = oneshot_tx.send(Ok(())) {
info!(
"{} Failed to send connection close error. Error: {:?}",
self.network_context, send_err
);
}
}
// Notify upstream if there's still no active connection. This might be redundant,
// but does not affect correctness.
if !self.active_peers.contains_key(&peer_id) {
let notif = ConnectionNotification::LostPeer(
peer_id,
lost_conn_metadata.addr().clone(),
lost_conn_metadata.origin(),
reason,
);
self.send_conn_notification(peer_id, notif);
}
}
}
}
async fn handle_connection_request(&mut self, request: ConnectionRequest) {
trace!("{} PeerManagerRequest::{:?}", self.network_context, request);
match request {
ConnectionRequest::DialPeer(requested_peer_id, addr, response_tx) => {
// Only dial peers which we aren't already connected with
if let Some((curr_connection, _)) = self.active_peers.get(&requested_peer_id) {
let error = PeerManagerError::AlreadyConnected(curr_connection.addr().clone());
debug!(
"{} Already connected with Peer {} using connection {:?}. Not dialing address {}",
self.network_context,
requested_peer_id.short_str(),
curr_connection,
addr
);
if response_tx.send(Err(error)).is_err() {
warn!(
"{} Receiver for DialPeer {} dropped",
self.network_context,
requested_peer_id.short_str()
);
}
} else {
self.dial_peer(requested_peer_id, addr, response_tx).await;
};
}
ConnectionRequest::DisconnectPeer(peer_id, resp_tx) => {
// Send a CloseConnection request to NetworkProvider and drop the send end of the
// NetworkRequest channel.
if let Some((conn_metadata, sender)) = self.active_peers.remove(&peer_id) {
// This should trigger a disconnect.
drop(sender);
// Add to outstanding disconnect requests.
self.outstanding_disconnect_requests
.insert(conn_metadata.connection_id(), resp_tx);
} else {
info!(
"{} Connection with peer: {} is already closed",
self.network_context,
peer_id.short_str(),
);
if let Err(err) = resp_tx.send(Err(PeerManagerError::NotConnected(peer_id))) {
info!(
"{} Failed to indicate that connection is already closed. Error: {:?}",
self.network_context, err
);
}
}
}
}
}
async fn handle_request(&mut self, request: PeerManagerRequest) {
trace!("{} PeerManagerRequest::{:?}", self.network_context, request);
match request {
PeerManagerRequest::SendMessage(peer_id, msg) => {
if let Some((_, sender)) = self.active_peers.get_mut(&peer_id) {
if let Err(err) = sender.push(msg.protocol, NetworkRequest::SendMessage(msg)) {
info!(
"{} Failed to forward outbound message to downstream actor. Error:
{:?}",
self.network_context, err
);
}
} else {
warn!(
"{} Peer {} is not connected",
self.network_context,
peer_id.short_str()
);
}
}
PeerManagerRequest::SendRpc(peer_id, req) => {
if let Some((_, sender)) = self.active_peers.get_mut(&peer_id) {
if let Err(err) = sender.push(req.protocol, NetworkRequest::SendRpc(req)) {
info!(
"{} Failed to forward outbound rpc to downstream actor. Error:
{:?}",
self.network_context, err
);
}
} else {
warn!(
"{} Peer {} is not connected",
self.network_context,
peer_id.short_str()
);
}
}
}
}
fn start_connection_listener(&mut self) {
let transport_handler = self
.transport_handler
.take()
.expect("Transport handler already taken");
self.executor.spawn(transport_handler.listen());
}
/// In the event two peers simultaneously dial each other we need to be able to do
/// tie-breaking to determine which connection to keep and which to drop in a deterministic
/// way. One simple way is to compare our local PeerId with that of the remote's PeerId and
/// keep the connection where the peer with the greater PeerId is the dialer.
///
/// Returns `true` if the existing connection should be dropped and `false` if the new
/// connection should be dropped.
fn simultaneous_dial_tie_breaking(
own_peer_id: PeerId,
remote_peer_id: PeerId,
existing_origin: ConnectionOrigin,
new_origin: ConnectionOrigin,
) -> bool {
match (existing_origin, new_origin) {
// If the remote dials while an existing connection is open, the older connection is
// dropped.
(ConnectionOrigin::Inbound, ConnectionOrigin::Inbound) => true,
// We should never dial the same peer twice, but if we do drop the old connection
(ConnectionOrigin::Outbound, ConnectionOrigin::Outbound) => true,
(ConnectionOrigin::Inbound, ConnectionOrigin::Outbound) => remote_peer_id < own_peer_id,
(ConnectionOrigin::Outbound, ConnectionOrigin::Inbound) => own_peer_id < remote_peer_id,
}
}
fn add_peer(&mut self, connection: Connection<TSocket>) {
let conn_meta = connection.metadata.clone();
let peer_id = conn_meta.peer_id();
assert_ne!(self.network_context.peer_id(), peer_id);
let mut send_new_peer_notification = true;
// Check for and handle simultaneous dialing
if let Entry::Occupied(active_entry) = self.active_peers.entry(peer_id) {
let (curr_conn_metadata, _) = active_entry.get();
if Self::simultaneous_dial_tie_breaking(
self.network_context.peer_id(),
peer_id,
curr_conn_metadata.origin(),
conn_meta.origin(),
) {
let (_, peer_handle) = active_entry.remove();
// Drop the existing connection and replace it with the new connection
drop(peer_handle);
info!(
"{} Closing existing connection with Peer {} to mitigate simultaneous dial",
self.network_context,
peer_id.short_str()
);
send_new_peer_notification = false;
} else {
info!(
"{} Closing incoming connection with Peer {} to mitigate simultaneous dial",
self.network_context,
peer_id.short_str()
);
let network_context = self.network_context.clone();
// Drop the new connection and keep the one already stored in active_peers
let drop_fut = async move {
let mut connection = connection;
if let Err(e) = tokio::time::timeout(
transport::TRANSPORT_TIMEOUT,
connection.socket.close(),
)
.await
{
error!(
"{} Closing connection with Peer {} failed with error: {}",
network_context,
peer_id.short_str(),
e
);
};
};
self.executor.spawn(drop_fut);
return;
}
}
// Initialize a new network stack for this connection.
let (network_reqs_tx, network_notifs_rx) = NetworkProvider::start(
self.executor.clone(),
connection,
self.transport_notifs_tx.clone(),
self.max_concurrent_network_reqs,
self.max_concurrent_network_notifs,
self.channel_size,
self.max_frame_size,
);
// Start background task to handle events (RPCs and DirectSend messages) received from
// peer.
self.spawn_peer_network_events_handler(peer_id, network_notifs_rx);
// Save NetworkRequest sender to `active_peers`.
self.active_peers
.insert(peer_id, (conn_meta.clone(), network_reqs_tx));
// Send NewPeer notification to connection event handlers.
if send_new_peer_notification {
let notif = ConnectionNotification::NewPeer(
peer_id,
conn_meta.addr().clone(),
conn_meta.origin(),
self.network_context.clone(),
);
self.send_conn_notification(peer_id, notif);
}
}
/// Sends a `ConnectionNotification` to all event handlers, warns on failures
fn | (&mut self, peer_id: PeerId, notification: ConnectionNotification) {
for handler in self.connection_event_handlers.iter_mut() {
if let Err(e) = handler.push(peer_id, notification.clone()) {
warn!(
"{} Failed to send notification {:?} to handler for peer: {}. Error: {:?}",
self.network_context,
notification,
peer_id.short_str(),
e
);
}
}
}
async fn dial_peer(
&mut self,
peer_id: PeerId,
address: NetworkAddress,
response_tx: oneshot::Sender<Result<(), PeerManagerError>>,
) {
let request = TransportRequest::DialPeer(peer_id, address, response_tx);
self.transport_reqs_tx.send(request).await.unwrap();
}
fn spawn_peer_network_events_handler(
&self,
peer_id: PeerId,
network_events: libra_channel::Receiver<ProtocolId, NetworkNotification>,
) {
let mut upstream_handlers = self.upstream_handlers.clone();
let network_context = self.network_context.clone();
self.executor.spawn(network_events.for_each_concurrent(
self.max_concurrent_network_reqs,
move |inbound_event| {
Self::handle_inbound_event(
network_context.clone(),
inbound_event,
peer_id,
&mut upstream_handlers,
);
futures::future::ready(())
},
));
}
fn handle_inbound_event(
network_context: Arc<NetworkContext>,
inbound_event: NetworkNotification,
peer_id: PeerId,
upstream_handlers: &mut HashMap<
ProtocolId,
libra_channel::Sender<(PeerId, ProtocolId), PeerManagerNotification>,
>,
) {
match inbound_event {
NetworkNotification::RecvMessage(msg) => {
let protocol = msg.protocol;
if let Some(handler) = upstream_handlers.get_mut(&protocol) {
// Send over libra channel for fairness.
if let Err(err) = handler.push(
(peer_id, protocol),
PeerManagerNotification::RecvMessage(peer_id, msg),
) {
warn!(
"{} Upstream handler unable to handle messages for protocol: {:?}. Error:
{:?}",
network_context,
protocol, err
);
}
} else {
unreachable!(
"{} Received network event for unregistered protocol",
network_context
);
}
}
NetworkNotification::RecvRpc(rpc_req) => {
let protocol = rpc_req.protocol;
if let Some(handler) = upstream_handlers.get_mut(&protocol) {
// Send over libra channel for fairness.
if let Err(err) = handler.push(
(peer_id, protocol),
PeerManagerNotification::RecvRpc(peer_id, rpc_req),
) {
warn!(
"{} Upstream handler unable to handle rpc for protocol: {:?}. Error:
{:?}",
network_context, protocol, err
);
}
} else {
unreachable!(
"{} Received network event for unregistered protocol",
network_context
);
}
}
}
}
}
#[derive(Debug)]
enum TransportRequest {
DialPeer(
PeerId,
NetworkAddress,
oneshot::Sender<Result<(), PeerManagerError>>,
),
}
#[derive(Debug, Serialize)]
pub enum TransportNotification<TSocket>
where
TSocket: AsyncRead + AsyncWrite,
{
NewConnection(#[serde(skip)] Connection<TSocket>),
Disconnected(ConnectionMetadata, DisconnectReason),
}
/// Responsible for listening for new incoming connections
struct TransportHandler<TTransport, TSocket>
where
TTransport: Transport,
TSocket: AsyncRead + AsyncWrite,
{
network_context: Arc<NetworkContext>,
/// [`Transport`] that is used to establish connections
transport: TTransport,
listener: Fuse<TTransport::Listener>,
transport_reqs_rx: channel::Receiver<TransportRequest>,
transport_notifs_tx: channel::Sender<TransportNotification<TSocket>>,
}
impl<TTransport, TSocket> TransportHandler<TTransport, TSocket>
where
TTransport: Transport<Output = Connection<TSocket>>,
TTransport::Listener: 'static,
TTransport::Inbound: 'static,
TTransport::Outbound: 'static,
TSocket: AsyncRead + AsyncWrite + 'static,
{
fn new(
network_context: Arc<NetworkContext>,
transport: TTransport,
listen_addr: NetworkAddress,
transport_reqs_rx: channel::Receiver<TransportRequest>,
transport_notifs_tx: channel::Sender<TransportNotification<TSocket>>,
) -> (Self, NetworkAddress) {
let (listener, listen_addr) = transport
.listen_on(listen_addr)
.expect("Transport listen on fails");
debug!("{} listening on {:?}", network_context, listen_addr);
(
Self {
network_context,
transport,
listener: listener.fuse(),
transport_reqs_rx,
transport_notifs_tx,
},
listen_addr,
)
}
async fn listen(mut self) {
let mut pending_inbound_connections = FuturesUnordered::new();
let mut pending_outbound_connections = FuturesUnordered::new();
debug!(
"{} Incoming connections listener Task started",
self.network_context
);
loop {
futures::select! {
dial_request = self.transport_reqs_rx.select_next_some() => {
if let Some(fut) = self.dial_peer(dial_request) {
pending_outbound_connections.push(fut);
}
},
incoming_connection = self.listener.select_next_some() => {
match incoming_connection {
Ok((upgrade, addr)) => {
debug!("{} Incoming connection from {}", self.network_context, addr);
pending_inbound_connections.push(upgrade.map(|out| (out, addr)));
}
Err(e) => {
warn!("{} Incoming connection error {}", self.network_context, e);
}
}
},
(upgrade, addr, peer_id, response_tx) = pending_outbound_connections.select_next_some() => {
self.handle_completed_outbound_upgrade(upgrade, addr, peer_id, response_tx).await;
},
(upgrade, addr) = pending_inbound_connections.select_next_some() => {
self.handle_completed_inbound_upgrade(upgrade, addr).await;
},
complete => break,
}
}
error!(
"{} Incoming connections listener Task ended",
self.network_context
);
}
fn dial_peer(
&self,
dial_peer_request: TransportRequest,
) -> Option<
BoxFuture<
'static,
(
Result<Connection<TSocket>, TTransport::Error>,
NetworkAddress,
PeerId,
oneshot::Sender<Result<(), PeerManagerError>>,
),
>,
> {
match dial_peer_request {
TransportRequest::DialPeer(peer_id, addr, response_tx) => {
match self.transport.dial(peer_id, addr.clone()) {
Ok(upgrade) => Some(
upgrade
.map(move |out| (out, addr, peer_id, response_tx))
.boxed(),
),
Err(error) => {
if response_tx
.send(Err(PeerManagerError::from_transport_error(error)))
.is_err()
{
warn!(
"{} Receiver for DialPeer {} request dropped",
self.network_context,
peer_id.short_str()
);
}
None
}
}
}
}
}
async fn handle_completed_outbound_upgrade(
&mut self,
upgrade: Result<Connection<TSocket>, TTransport::Error>,
addr: NetworkAddress,
peer_id: PeerId,
response_tx: oneshot::Sender<Result<(), PeerManagerError>>,
) {
match upgrade {
Ok(connection) => {
let dialed_peer_id = connection.metadata.peer_id();
let response = if dialed_peer_id == peer_id {
debug!(
"{} Peer '{}' successfully dialed at '{}'",
self.network_context,
peer_id.short_str(),
addr
);
let event = TransportNotification::NewConnection(connection);
// Send the new connection to PeerManager
self.transport_notifs_tx.send(event).await.unwrap();
Ok(())
} else {
let e = ::anyhow::format_err!(
"Dialed PeerId ({}) differs from expected PeerId ({})",
dialed_peer_id.short_str(),
peer_id.short_str()
);
warn!("{} {}", self.network_context, e);
Err(PeerManagerError::from_transport_error(e))
};
if response_tx.send(response).is_err() {
warn!(
"{} Receiver for DialPeer {} request dropped",
self.network_context,
peer_id.short_str()
);
}
}
Err(error) => {
error!(
"{} Error dialing Peer {} at {}",
self.network_context,
peer_id.short_str(),
addr
);
if response_tx
.send(Err(PeerManagerError::from_transport_error(error)))
.is_err()
{
warn!(
"{} Receiver for DialPeer {} request dropped",
self.network_context,
peer_id.short_str()
);
}
}
}
}
async fn handle_completed_inbound_upgrade(
&mut self,
upgrade: Result<Connection<TSocket>, TTransport::Error>,
addr: NetworkAddress,
) {
match upgrade {
Ok(connection) => {
debug!(
"{} Connection from {} at {} successfully upgraded",
self.network_context,
connection.metadata.peer_id().short_str(),
addr
);
let event = TransportNotification::NewConnection(connection);
// Send the new connection to PeerManager
self.transport_notifs_tx.send(event).await.unwrap();
}
Err(e) => {
warn!(
"{} Connection from {} failed to upgrade {}",
self.network_context, addr, e
);
}
}
}
}
| send_conn_notification |
ChatterBot.py | # Dependencies
import tweepy
import time
import json
from config import consumer_key, consumer_secret, access_token, access_token_secret
# Twitter API Keys
consumer_key = consumer_key
consumer_secret = consumer_secret
access_token = access_token
access_token_secret = access_token_secret
# Setup Tweepy API Authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
# Create a function that tweets
def TweetOut(tweet_number):
|
# Create a function that calls the TweetOut function every minute
counter = 0
# Infinite loop
while(True):
# Call the TweetQuotes function and specify the tweet number
TweetOut(counter)
# Once tweeted, wait 60 seconds before doing anything else
time.sleep(60)
# Add 1 to the counter prior to re-running the loop
counter = counter + 1 | api.update_status(
"Can't stop. Won't stop. Chatting! This is Tweet #%s!" %
tweet_number) |
server_test.go | package test
import (
"context"
"fmt"
"strings"
"testing"
"time"
"github.com/grafana/iot-sitewise-datasource/pkg/server"
"github.com/grafana/grafana-plugin-sdk-go/experimental"
"github.com/grafana/grafana-plugin-sdk-go/backend"
"github.com/grafana/iot-sitewise-datasource/pkg/sitewise"
"github.com/grafana/iot-sitewise-datasource/pkg/sitewise/client"
"github.com/grafana/iot-sitewise-datasource/pkg/sitewise/client/mocks"
)
var (
timeRange = backend.TimeRange{
From: time.Now().Add(time.Hour * -3),
To: time.Now(),
}
)
type testServerScenarioFn func(t *testing.T) *testScenario
type testScenario struct {
name string
queries []backend.DataQuery
mockSw *mocks.SitewiseClient
goldenFileName string
handlerFn func(srvr *server.Server) backend.QueryDataHandlerFunc
validationFn func(t *testing.T, dr *backend.QueryDataResponse)
}
func (ts *testScenario) run(t *testing.T) {
runTestScenario(t, ts)
}
// Golang's cwd is the executable file location.
// hack to find the test data directory
func testDataRelativePath(filename string) string |
func mockedDatasource(swmock *mocks.SitewiseClient) server.Datasource {
return &sitewise.Datasource{
GetClient: func(region string) (client client.SitewiseClient, err error) {
client = swmock
return
},
}
}
func runTestScenario(t *testing.T, scenario *testScenario) {
t.Run(scenario.name, func(t *testing.T) {
ctx := context.Background()
req := &backend.QueryDataRequest{
PluginContext: backend.PluginContext{},
Queries: scenario.queries,
}
srvr := &server.Server{
Datasource: mockedDatasource(scenario.mockSw),
}
qdr, err := scenario.handlerFn(srvr)(ctx, req)
// this should always be nil, as the error is wrapped in the QueryDataResponse
if err != nil {
t.Fatal(err)
}
if scenario.validationFn != nil {
scenario.validationFn(t, qdr)
}
// write out the golden for all data responses
for i, dr := range qdr.Responses {
fname := fmt.Sprintf(testDataRelativePath("%s-%s.golden.txt"), scenario.goldenFileName, i)
// temporary fix for golden files https://github.com/grafana/grafana-plugin-sdk-go/issues/213
for _, fr := range dr.Frames {
if fr.Meta != nil {
fr.Meta.Custom = nil
}
}
if err := experimental.CheckGoldenDataResponse(fname, &dr, true); err != nil {
if !strings.Contains(err.Error(), "no such file or directory") {
t.Fatal(err)
}
}
}
})
}
| {
return "../../testdata/" + filename
} |
UserNotificationsList.tsx | import { Notification } from 'linode-js-sdk/lib/account';
import { compose, path } from 'ramda';
import * as React from 'react';
import { RouteComponentProps, withRouter } from 'react-router';
import {
createStyles,
Theme,
withStyles,
WithStyles
} from 'src/components/core/styles';
import Typography from 'src/components/core/Typography';
import { dcDisplayNames } from 'src/constants';
import { reportException } from 'src/exceptionReporting';
import UserNotificationListItem from './UserNotificationListItem';
type ClassNames = 'emptyText';
const styles = (theme: Theme) =>
createStyles({
emptyText: {
padding: `${theme.spacing(2)}px ${theme.spacing(3)}px`,
fontFamily: theme.font.bold
}
});
interface Props {
notifications: Notification[];
closeMenu: () => void;
}
type CombinedProps = Props & RouteComponentProps<void> & WithStyles<ClassNames>;
class UserNotificationsList extends React.Component<CombinedProps, {}> {
render() {
const {
classes,
notifications,
closeMenu,
history: { push }
} = this.props;
if (notifications.length === 0) {
return (
<Typography className={classes.emptyText}>
You have no notifications.
</Typography>
);
}
return (notifications || []).map((notification, idx) => {
const interceptedNotification = interceptNotification(notification);
const onClick = createClickHandlerForNotification(
interceptedNotification,
(targetPath: string) => {
closeMenu();
push(targetPath);
}
);
return React.createElement(UserNotificationListItem, {
key: idx,
label: interceptedNotification.label,
message: interceptedNotification.message,
severity: interceptedNotification.severity,
onClick
});
});
} |
const interceptNotification = (notification: Notification): Notification => {
/** this is an outage to one of the datacenters */
if (
notification.type === 'outage' &&
notification.entity &&
notification.entity.type === 'region'
) {
const convertedRegion = dcDisplayNames[notification.entity.id];
if (!convertedRegion) {
reportException(
'Could not find the DC name for the outage notification',
{
rawRegion: notification.entity.id,
convertedRegion
}
);
}
/** replace "this facility" with the name of the datacenter */
return {
...notification,
label: notification.label
.toLowerCase()
.replace('this facility', convertedRegion || 'one of our facilities'),
message: notification.message
.toLowerCase()
.replace('this facility', convertedRegion || 'one of our facilities')
};
}
/** there is maintenance on this Linode */
if (
notification.type === 'maintenance' &&
notification.entity &&
notification.entity.type === 'linode'
) {
/** replace "this Linode" with the name of the Linode */
const linodeAttachedToNotification = path(['label'], notification.entity);
return {
...notification,
label: `Maintenance Scheduled`,
severity: 'major',
message: `${
linodeAttachedToNotification
? `Linode ${linodeAttachedToNotification}`
: `This Linode`
}
has scheduled maintenance`
};
}
return notification;
};
const createClickHandlerForNotification = (
notification: Notification,
onClick: (path: string) => void
) => {
/**
* Privacy policy changes can only be made in CF manager for now, so we have to
* link externally.
*/
if (
notification.type === 'notice' &&
notification.label === `We've updated our policies.`
) {
return (e: React.MouseEvent<HTMLElement>) => {
window.location.href = `https://manager.linode.com/account/policy`;
};
}
const type = path<string>(['entity', 'type'], notification);
const id = path<number>(['entity', 'id'], notification);
if (!type || !id) {
return;
}
switch (type) {
case 'linode':
return (e: React.MouseEvent<HTMLElement>) => onClick(`/linodes/${id}`);
case 'ticket':
return (e: React.MouseEvent<HTMLElement>) =>
onClick(`/support/tickets/${id}`);
default:
return;
}
};
const styled = withStyles(styles);
const enhanced = compose<any, any, any>(
styled,
withRouter
);
export default enhanced(UserNotificationsList); | } |
index.js | const numbersMapping = {
0: "zero", 1: "one", 2: "two", 3: "three", 4: "four",
5: "five", 6: "six", 7: "seven", 8: "eight", 9: "nine",
10: "ten", 11: "eleven", 12: "twelve", 13: "thirteen",
14: "fourteen", 15: "fifteen", 16: "sixteen",
17: "seventeen", 18: "eighteen", 19: "nineteen",
20: "twenty", 30: "thirty", 40: "forty", 50: "fifty",
60: "sixty", 70: "seventy", 80: "eighty", 90: "ninety",
100: "hundred"
};
module.exports = function toReadable(number){
const maxDigitsNumber = 100;
if (number < maxDigitsNumber && numbersMapping[number] != undefined){ return numbersMapping[number]; }
let result = [];
if (number >= maxDigitsNumber){
result.push(numbersMapping[Math.floor((number - number % maxDigitsNumber) / maxDigitsNumber)]);
result.push(numbersMapping[maxDigitsNumber]);
}
number %= maxDigitsNumber;
if (number > 20){
result.push(numbersMapping[number - number % 10]);
number %= 10;
}
| if (number){
result.push(numbersMapping[number]);
}
return result.join(" ");
} | |
string.d.ts | import { StringDistribution } from "../types";
/**
* Returns a distribution that returns a random string using numbers,
* uppercase and lowercase letters, `_`, and `-` of length `length`.
| /**
* Returns a distribution that returns a random string using the provided
* string pool as the possible characters to choose from of length `length`.
* @param length Length of the result string
*/
export declare function string(pool: string): StringDistribution; | * @param length Length of the result string
*/
export declare function string(): StringDistribution;
|
Object.setPath.ts | // Copyright 2020 UBIO Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import jsonPointer from 'jsonpointer';
import { RuntimeCtx } from '../ctx';
import { Element } from '../element';
import { params } from '../model';
import { Pipe } from '../pipe';
import { Pipeline } from '../pipeline';
import * as util from '../util';
export class | extends Pipe {
static $type = 'Object.setPath';
static $help = `
Evaluates the inner pipeline for each element and assigns the resulting value
to the input object at specified path.
The inner pipeline must resolve to a single element.
An error is thrown if input value is not an object or an array.
### See Also
- Move Path: for moving values to different keys without modification
- Transform Path: for modifying the value at specified path without moving it
### Use For
- building an object using values from various sources
- modifying or extending existing objects
`;
@params.String({
source: 'dataPaths',
help: 'JSON pointer into input object where the new value is to be written.',
showInHeader: true,
})
path: string = '';
@params.Enum({
enum: ['bypass', 'discard', 'delete', 'set null'],
help: `
Specifies what action to take if the pipeline produces null or undefined:
- bypass: do not modify existing value
- discard: remove the element from output set
- set null: assign \`null\` at specified path
- delete: delete the object key at specified path
`,
})
onNull: string = 'bypass';
@params.Pipeline({
label: 'Value',
help: 'Pipeline for evaluating new value, executed per each element.',
})
pipeline!: Pipeline;
async apply(inputSet: Element[], ctx: RuntimeCtx): Promise<Element[]> {
const path = this.path;
util.assertScript(this.pipeline.length, 'No pipes defined');
return await this.map(inputSet, async el => {
util.checkType(el.value, 'object');
const result = await this.pipeline.selectOneOrNull([el], ctx);
const val = result ? result.value : null;
const newData = util.deepClone(el.value);
if (val == null) {
switch (this.onNull) {
case 'skip':
case 'discard':
return null;
case 'bypass':
return el;
case 'delete':
jsonPointer.set(newData, path, undefined);
return el.clone(newData);
case 'set null':
jsonPointer.set(newData, path, null);
return el.clone(newData);
}
}
jsonPointer.set(newData, path, val);
return el.clone(newData);
});
}
}
| ValueSetPath |
ssl_without_ca.js | // Must turn these off so we don't have CA file supplied automatically.
TestData.usex509 = false;
TestData.useSSL = false;
var SERVER_CERT = "jstests/libs/server.pem";
var CLIENT_CERT = "jstests/libs/client.pem";
var CLIENT_USER = "C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=client";
jsTest.log("Assert x509 auth is not allowed when a standalone mongod is run without a CA file.");
// allowSSL instead of requireSSL so that the non-SSL connection succeeds.
var conn = MongoRunner.runMongod({sslMode: 'allowSSL',
sslPEMKeyFile: SERVER_CERT,
auth: ''});
var external = conn.getDB('$external');
external.createUser({
user: CLIENT_USER,
roles: [
{'role':'userAdminAnyDatabase', 'db':'admin'},
{'role':'readWriteAnyDatabase', 'db':'admin'}
]});
// Should not be able to authenticate with x509.
// Authenticate call will return 1 on success, 0 on error.
var exitStatus = runMongoProgram('mongo', '--ssl', '--sslAllowInvalidCertificates',
'--sslPEMKeyFile', CLIENT_CERT,
'--port', conn.port,
'--eval', ('quit(db.getSisterDB("$external").auth({' +
'user: "' + CLIENT_USER + '" ,' +
'mechanism: "MONGODB-X509"}));'
));
assert.eq(exitStatus, 0, "authentication via MONGODB-X509 without CA succeeded");
MongoRunner.stopMongod(conn.port);
jsTest.log("Assert mongod doesn\'t start with CA file missing and clusterAuthMode=x509."); | var conn = MongoRunner.runMongod(sslParams);
assert.isnull(conn, "server started with x509 clusterAuthMode but no CA file");
jsTest.log("Assert mongos doesn\'t start with CA file missing and clusterAuthMode=x509.");
assert.throws(function() {
new ShardingTest({shards: 1, mongos: 1, verbose: 2,
other: {configOptions: sslParams,
mongosOptions: sslParams,
shardOptions: sslParams}});
},
null,
"mongos started with x509 clusterAuthMode but no CA file"); |
var sslParams = {clusterAuthMode: 'x509', sslMode: 'requireSSL', sslPEMKeyFile: SERVER_CERT}; |
test.rs | use native_tls::{Certificate, TlsConnector}; |
#[test]
fn connect() {
let cert = include_bytes!("../../test/server.crt");
let cert = Certificate::from_pem(cert).unwrap();
let mut builder = TlsConnector::builder().unwrap();
builder.add_root_certificate(cert).unwrap();
let connector = builder.build().unwrap();
let handshake = NativeTls::with_connector(connector);
let conn = Connection::connect(
"postgres://ssl_user@localhost:5433/postgres",
TlsMode::Require(&handshake),
).unwrap();
conn.execute("SELECT 1::VARCHAR", &[]).unwrap();
} | use postgres::{Connection, TlsMode};
use NativeTls; |
Markdown.tsx | import React, {
HTMLAttributes,
ReactElement,
useEffect,
useState,
} from "react";
import cn from "classnames";
import { Markdown as MarkdownRenderer } from "react-marked-renderer";
import { getLanguage, highlightCode } from "components/CodeBlock/utils";
import styles from "./Markdown.module.scss";
import { renderers } from "./renderers";
import { transformMarkdown } from "./utils";
function useMarkdownResolver(markdown: MarkdownProps["children"]): string {
/* eslint-disable react-hooks/rules-of-hooks */
// i will never swap between strings and promises
if (typeof markdown === "string") {
return transformMarkdown(markdown);
}
const [resolved, setResolved] = useState("");
useEffect(() => {
markdown().then((md) => {
if (typeof md === "string") {
setResolved(transformMarkdown(md));
} else if (typeof md.default === "string") {
setResolved(transformMarkdown(md.default));
}
});
}, [markdown]);
return resolved;
}
export type ResolveMarkdown = () => Promise<string | { default: string }>;
export type MarkdownChildren = string | ResolveMarkdown;
export interface MarkdownProps extends HTMLAttributes<HTMLDivElement> {
children: ResolveMarkdown | string; | export default function Markdown({
className,
children,
disableSinglePMargin,
...props
}: MarkdownProps): ReactElement {
const markdown = useMarkdownResolver(children);
return (
<>
<div
{...props}
className={cn(
styles.container,
{
[styles.marginless]: disableSinglePMargin,
},
className
)}
>
<MarkdownRenderer
markdown={markdown}
renderers={renderers}
getLanguage={getLanguage}
highlightCode={highlightCode}
/>
</div>
</>
);
} | disableSinglePMargin?: boolean;
}
|
main.go | package main
import (
"db-security-backend/config"
"log"
"db-security-backend/controller"
"github.com/gin-gonic/gin"
)
func main() {
cfg, err := config.ParseConfig("./config/engine.json")
if err != nil {
log.Fatal(err.Error())
return
}
_, err = config.OrmEngine(cfg)
if err != nil {
log.Fatal(err.Error())
return
}
engine := gin.Default()
RegisterRouter(engine)
_ = engine.Run(cfg.AppHost + ":" + cfg.AppPort)
}
func RegisterRouter(engine *gin.Engine) | {
new(controller.UserController).Router(engine)
new(controller.StaffController).Router(engine)
new(controller.IpController).Router(engine)
new(controller.DownloadRecordController).Router(engine)
} |
|
fatbrain.go | package fatbrain
import (
"errors"
"strings"
"time"
"strconv"
"sync"
"github.com/Sirupsen/logrus"
"github.com/wallnutkraken/fatbot/fatai"
"github.com/wallnutkraken/fatbot/fatdata"
"github.com/wallnutkraken/fatbot/fatplugin"
"github.com/wallnutkraken/telegogo"
)
const (
MinChainLength = 1
MaxChainLength = 3
MinMessageCountForMessaging = 100
MaxWordCount = 12
)
type FatBotBrain struct {
chain *fatai.LSTMWrapper
telegram TeleGogo.Client
refreshPeriod time.Duration
lastID int
inChats []int
database *fatdata.Data
messageCount int
messagingChannel chan bool
listeningChannel chan bool
continueMessaging bool
reactors []fatplugin.Reactor
cleaners []fatplugin.Cleaner
chainStatus *ChainStatus
chatMutex *sync.Mutex
}
type FatBotSettings struct {
TelegramKey string
RefreshPeriod time.Duration
Database *fatdata.Data
Chats []int
Cleaners []fatplugin.Cleaner
FatLSTM *fatai.LSTMWrapper
StartTraining bool
}
// New creates a new instance of FatBotBrain
func New(settings FatBotSettings) (*FatBotBrain, error) {
bot, err := TeleGogo.NewBot(settings.TelegramKey)
if err != nil {
return nil, err
}
brain := &FatBotBrain{
chain: settings.FatLSTM,
telegram: bot,
refreshPeriod: settings.RefreshPeriod,
inChats: settings.Chats,
database: settings.Database,
messageCount: 0,
continueMessaging: true,
reactors: make([]fatplugin.Reactor, 0),
cleaners: settings.Cleaners,
chatMutex: &sync.Mutex{},
chainStatus: newChainStatus(),
}
if settings.StartTraining {
logrus.WithError(err).Error("Failed loading memory model, starting training new one from database...")
if err := brain.TrainFor(time.Hour * 18); err != nil {
logrus.WithError(err).Fatal("Failed training")
}
}
return brain, nil
}
func (f *FatBotBrain) IsTraining() bool {
return f.chainStatus.IsTraining()
}
func (f *FatBotBrain) AddReactors(reactors []fatplugin.Reactor) {
f.reactors = append(f.reactors, reactors...)
}
func (f *FatBotBrain) Feed() error {
if !f.chainStatus.IsTraining() {
f.chainStatus.SetTraining(true)
msgs, err := f.database.GetMessages()
if err != nil {
return err
}
f.chain.StartTraining(msgs, func() {
f.chainStatus.SetTraining(false)
})
}
return nil
}
func (f *FatBotBrain) generate() string {
if f.IsTraining() {
return ""
}
text := f.chain.Generate()
logrus.Infof("Generated message [%s] with [%d] newlines", text, strings.Count(text, "\n"))
firstLine := strings.Split(text, "\n")[0]
// Split into words
words := strings.Split(firstLine, " ")
if len(words) > MaxWordCount {
return strings.Join(words[:MaxWordCount], " ")
} else {
return firstLine
}
}
func (f *FatBotBrain) AddChat(chatID int) error {
for _, existingChatID := range f.inChats {
if existingChatID == chatID |
}
f.chatMutex.Lock()
f.inChats = append(f.inChats, chatID)
err := f.database.AddChat(chatID)
f.chatMutex.Unlock()
return err
}
func (f *FatBotBrain) StopTraining() {
if !f.chainStatus.IsTraining() && !f.chain.IsForTraining() {
return
}
if f.chain.IsForTraining() {
f.chain.StopTrainFor()
}
if f.chainStatus.IsTraining() {
f.chain.Stop()
}
f.chainStatus.SetTraining(false)
}
func (f *FatBotBrain) TrainFor(duration time.Duration) error {
if f.chainStatus.IsTraining() {
f.StopTraining()
}
f.chainStatus.SetTraining(true)
msgs, err := f.database.GetMessages()
if err != nil {
return err
}
f.chain.TrainFor(msgs, duration)
return nil
}
func (f *FatBotBrain) removeChat(chatID int) error {
var chatIndex int
for index, id := range f.inChats {
if id == chatID {
chatIndex = index
}
}
if chatIndex == 0 {
return errors.New("Could not find that chat")
}
f.chatMutex.Lock()
f.inChats = append(f.inChats[:chatIndex], f.inChats[chatIndex:]...)
err := f.database.RemoveChat(chatID)
f.chatMutex.Unlock()
return err
}
func (f *FatBotBrain) Start() {
f.listeningChannel = f.startListening()
f.messagingChannel = f.startMessaging()
}
func (f *FatBotBrain) Stop() {
f.listeningChannel <- true
f.messagingChannel <- true
}
func (f *FatBotBrain) startMessaging() chan bool {
ch := make(chan bool, 0)
go func(f *FatBotBrain) {
for f.continueMessaging {
f.timedTrigger(ch)
}
}(f)
return ch
}
func (f *FatBotBrain) SendMessage(chatID int) error {
msgText := f.generate()
logrus.Infof("Sending message to [%d]: [%s]", chatID, msgText)
_, err := f.telegram.SendMessage(TeleGogo.SendMessageArgs{
ChatID: strconv.Itoa(chatID),
Text: msgText,
})
return err
}
func (f *FatBotBrain) startListening() chan bool {
ch := make(chan bool, 0)
go func(ch chan bool, brain *FatBotBrain) {
for {
select {
case <-ch:
return
case <-time.After(time.Second * f.refreshPeriod):
updates, err := f.telegram.GetUpdates(TeleGogo.GetUpdatesOptions{Offset: f.lastID + 1})
if err != nil {
logrus.WithError(err).Error("Failed getting updates")
continue
}
msgsToSave := make([]TeleGogo.Update, 0)
for _, update := range updates {
if update.Message.Text != "" {
logrus.Infof("Got message [%s]", update.Message.Text)
cleanText := update.Message.Text
for _, cleaner := range f.cleaners {
cleanText = cleaner.Clean(cleanText)
}
var reacted bool
for _, reactor := range f.reactors {
if reacted = reactor.React(update.Message.Chat.ID, cleanText); reacted {
// The bot reacted, continue
break
}
}
if !reacted {
msgsToSave = append(msgsToSave, update)
//f.Feed(cleanText)
}
} else {
continue
}
}
if len(updates) > 0 {
f.lastID = updates[len(updates)-1].ID
go f.saveMesages(msgsToSave)
}
}
}
}(ch, f)
return ch
}
func (f *FatBotBrain) saveMesages(updates []TeleGogo.Update) {
for _, update := range updates {
cleanText := update.Message.Text
for _, cleaner := range f.cleaners {
cleanText = cleaner.Clean(cleanText)
}
if err := f.database.SaveMessage(cleanText); err != nil {
logrus.WithError(err).Errorf("Failed saving message [%s] to database", cleanText)
}
}
}
| {
return errors.New("Chat already added")
} |
lock-stat.go | /*
* Minio Cloud Storage, (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and |
package cmd
// lockStat - encapsulates total, blocked and granted lock counts.
type lockStat struct {
total int64
blocked int64
granted int64
}
// lockWaiting - updates lock stat when a lock becomes blocked.
func (ls *lockStat) lockWaiting() {
ls.blocked++
ls.total++
}
// lockGranted - updates lock stat when a lock is granted.
func (ls *lockStat) lockGranted() {
ls.blocked--
ls.granted++
}
// lockTimedOut - updates lock stat when a lock is timed out.
func (ls *lockStat) lockTimedOut() {
ls.blocked--
ls.total--
}
// lockRemoved - updates lock stat when a lock is removed, by Unlock
// or ForceUnlock.
func (ls *lockStat) lockRemoved(granted bool) {
if granted {
ls.granted--
ls.total--
} else {
ls.blocked--
ls.total--
}
} | * limitations under the License.
*/ |
test_code_climate_formatting.py | #!/usr/bin/env python3
import unittest
import unittest.mock
import json
from clang_tidy_converter import CodeClimateFormatter, ClangMessage
class CodeClimateFormatterTest(unittest.TestCase):
def test_format(self):
child1 = ClangMessage('/some/file/path1.cpp', 8, 10, ClangMessage.Level.NOTE, 'Allocated here', '', ['return new A;', ' ^'])
msg = ClangMessage('/some/file/path.cpp', 100, 2, ClangMessage.Level.WARNING, 'Memory leak', 'bugprone-undefined-memory-manipulation.SomethingWrong',
['void a(int)', ' ^'], [child1])
formatter = CodeClimateFormatter()
args = unittest.mock.Mock()
args.use_location_lines = True
self.assertEqual(
"""{
"type": "issue",
"check_name": "bugprone-undefined-memory-manipulation.SomethingWrong",
"description": "Memory leak",
"content": {
"body": "```\\nvoid a(int)\\n ^\\n/some/file/path1.cpp:8:10: Allocated here\\nreturn new A;\\n ^\\n```"
},
"categories": [
"Bug Risk" | "lines": {
"begin": 100
}
},
"trace": {
"locations": [
{
"path": "/some/file/path1.cpp",
"lines": {
"begin": 8
}
}
]
},
"severity": "major",
"fingerprint": "f2f6ccb970f2259d10e525b4b5805a5c"
}\0
""", formatter.format([msg], args))
def test_extract_content(self):
child1 = ClangMessage('/some/file/path1.cpp', 8, 10, ClangMessage.Level.NOTE, 'Allocated here', '', ['return new A;', ' ^'])
msg = ClangMessage('/some/file/path.cpp', 100, 2, ClangMessage.Level.WARNING, 'Memory leak', 'bugprone-undefined-memory-manipulation.SomethingWrong',
['void a(int)', ' ^'], [child1])
formatter = CodeClimateFormatter()
self.assertEqual({
'body': '\n'.join([
'```',
'void a(int)',
' ^',
'/some/file/path1.cpp:8:10: Allocated here',
'return new A;',
' ^',
'```'])
}, formatter._extract_content(msg, object()))
def test_extract_bug_risk_category(self):
self._test_diagnostic_category('bugprone-use-after-move', 'Bug Risk')
def test_extract_compatibility_category_1(self):
self._test_diagnostic_category('modernize-replace-auto-ptr', 'Compatibility')
def test_extract_compatibility_category_2(self):
self._test_diagnostic_category('portability-restrict-system-includes', 'Compatibility')
def test_extract_compatibility_category_3(self):
self._test_diagnostic_category('boost-use-to-string', 'Compatibility')
def test_extract_performance_category(self):
self._test_diagnostic_category('performance-inefficient-algorithm', 'Performance')
def test_extract_clarity_category_1(self):
self._test_diagnostic_category('google-readability-avoid-underscore-in-googletest-name', 'Clarity')
def test_extract_clarity_category_2(self):
self._test_diagnostic_category('readability-misplaced-array-index', 'Clarity')
def test_extract_security_category_1(self):
self._test_diagnostic_category('android-cloexec-open', 'Security')
def test_extract_security_category_2(self):
self._test_diagnostic_category('clang-analyzer-security.insecureAPI.bcmp', 'Security')
def test_extract_style_category_1(self):
self._test_diagnostic_category('readability-identifier-naming', 'Style')
def test_extract_style_category_2(self):
self._test_diagnostic_category('cppcoreguidelines-avoid-goto', 'Style')
def test_extract_style_category_3(self):
self._test_diagnostic_category('hicpp-no-assembler', 'Style')
def test_extract_complexity_category(self):
self._test_diagnostic_category('readability-simplify-boolean-expr', 'Complexity')
def test_extract_duplication_category(self):
self._test_diagnostic_category('misc-redundant-expression', 'Duplication')
def test_extract_default_category(self):
self._test_diagnostic_category('cert-dcl16-c', 'Bug Risk')
def _test_diagnostic_category(self, diagnostic, category):
msg = ClangMessage(diagnostic_name=diagnostic)
formatter = CodeClimateFormatter()
self.assertIn(category, formatter._extract_categories(msg, object()))
def test_extract_duplicated_categories(self):
msg = ClangMessage(diagnostic_name='cppcoreguidelines-readability-avoid-goto')
formatter = CodeClimateFormatter()
categories = formatter._extract_categories(msg, object())
self.assertEqual(2, len(categories))
self.assertIn('Style', categories)
self.assertIn('Clarity', categories)
def test_extract_trace_lines(self):
child1 = ClangMessage('/some/file/path1.cpp', 8, 10)
msg = ClangMessage('/some/file/path.cpp', 100, 2, children=[child1])
formatter = CodeClimateFormatter()
args = unittest.mock.Mock()
args.use_location_lines = True
self.assertEqual({
'locations': [
{
'path': '/some/file/path1.cpp',
'lines': {
'begin': 8
}
}
]
}, formatter._extract_trace(msg, args))
def test_extract_trace_positions(self):
child1 = ClangMessage('/some/file/path1.cpp', 8, 10)
msg = ClangMessage('/some/file/path.cpp', 100, 2, children=[child1])
formatter = CodeClimateFormatter()
args = unittest.mock.Mock()
args.use_location_lines = False
self.assertEqual({
'locations': [
{
'path': '/some/file/path1.cpp',
'positions': {
'begin': {
'line': 8,
'column': 10
}
}
}
]
}, formatter._extract_trace(msg, args))
def test_extract_location_lines(self):
msg = ClangMessage('/some/file/path.cpp', 100, 2)
formatter = CodeClimateFormatter()
args = unittest.mock.Mock()
args.use_location_lines = True
self.assertEqual({
'path': '/some/file/path.cpp',
'lines': {
'begin': 100
}
}, formatter._extract_location(msg, args))
def test_extract_location_positions(self):
msg = ClangMessage('/some/file/path.cpp', 100, 2)
formatter = CodeClimateFormatter()
args = unittest.mock.Mock()
args.use_location_lines = False
self.assertEqual({
'path': '/some/file/path.cpp',
'positions': {
'begin': {
'line': 100,
'column': 2
}
}
}, formatter._extract_location(msg, args))
def test_extracting_note_severity(self):
self._test_extracting_severity(ClangMessage.Level.NOTE, 'info')
def test_extracting_remark_severity(self):
self._test_extracting_severity(ClangMessage.Level.REMARK, 'minor')
def test_extracting_warning_severity(self):
self._test_extracting_severity(ClangMessage.Level.WARNING, 'major')
def test_extracting_error_severity(self):
self._test_extracting_severity(ClangMessage.Level.ERROR, 'critical')
def test_extracting_fatal_severity(self):
self._test_extracting_severity(ClangMessage.Level.FATAL, 'blocker')
def _test_extracting_severity(self, level, severity_str):
msg = ClangMessage(level=level)
formatter = CodeClimateFormatter()
self.assertEqual(severity_str, formatter._extract_severity(msg, object()))
def test_generate_fingerprint_reproducibility(self):
msg1 = ClangMessage('path1', line=1)
msg2 = ClangMessage('path1', line=1)
formatter = CodeClimateFormatter()
self.assertEqual(formatter._generate_fingerprint(msg1), formatter._generate_fingerprint(msg2))
def test_generate_fingerprint_uses_filepath(self):
self._test_fingerprints_different(ClangMessage('/path/to/file1.cpp'), ClangMessage('/path/to/file2.cpp'))
def test_generate_fingerprint_uses_line(self):
self._test_fingerprints_different(ClangMessage(line=1), ClangMessage(line=2))
def test_generate_fingerprint_uses_column(self):
self._test_fingerprints_different(ClangMessage(column=1), ClangMessage(column=2))
def test_generate_fingerprint_uses_message(self):
self._test_fingerprints_different(ClangMessage(message='A'), ClangMessage(message='B'))
def test_generate_fingerprint_uses_diagnostic_name(self):
self._test_fingerprints_different(ClangMessage(diagnostic_name='A'), ClangMessage(diagnostic_name='B'))
def test_generate_fingerprint_uses_children(self):
child1 = ClangMessage(line=1)
child2 = ClangMessage(line=2)
self._test_fingerprints_different(ClangMessage(children=[child1]), ClangMessage(children=[child2]))
def _test_fingerprints_different(self, msg1, msg2):
formatter = CodeClimateFormatter()
self.assertNotEqual(formatter._generate_fingerprint(msg1), formatter._generate_fingerprint(msg2)) | ],
"location": {
"path": "/some/file/path.cpp", |
password.rs | use argon2::{Config, ThreadMode, Variant, Version};
use std::error::Error;
pub fn | (password: Vec<u8>) -> Result<Vec<u8>, Box<dyn Error>> {
let salt = b"This Project is Dedicated to Aveesha.";
let config = Config {
variant: Variant::Argon2id,
version: Version::Version13,
mem_cost: 65536,
time_cost: 1,
lanes: 4,
thread_mode: ThreadMode::Parallel,
secret: &[],
ad: &[],
hash_length: 64,
};
match argon2::hash_raw(&password, salt, &config) {
Ok(i) => return Ok(i),
Err(e) => return Err(e.into()),
}
}
| hash_argon2 |
api.py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Piston Cloud Computing, Inc.
# Copyright 2012-2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import functools
import re
import string
import uuid
from oslo.config import cfg
import six
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.image import glance
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova.objects import aggregate as aggregate_obj
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.objects import instance_action
from nova.objects import instance_group as instance_group_obj
from nova.objects import instance_info_cache
from nova.objects import keypair as keypair_obj
from nova.objects import migration as migration_obj
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.objects import service as service_obj
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
import nova.policy
from nova import quota
from nova import rpc
from nova import servicegroup
from nova import utils
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.BoolOpt('allow_migrate_to_same_host',
default=False,
help='Allow migrate machine to the same host. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='Availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='Kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(uuid)s',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
RO_SECURITY_GROUPS = ['default']
VIDEO_RAM = 'hw_video:ram_max_mb'
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance['vm_state'] not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
if (task_state is not None and
instance['task_state'] not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method=f.__name__)
if must_have_launched and not instance['launched_at']:
raise exception.InstanceInvalidState(
attr=None,
not_launched=True,
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance['host']:
raise exception.InstanceNotReady(instance_id=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance['locked'] and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None, volume_api=None,
security_group_api=None, **kwargs):
self.image_service = (image_service or
glance.get_default_image_service())
self.network_api = network_api or network.API()
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver())
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance['cell_name']
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance['uuid'])
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance['uuid'],
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
instance_action.InstanceAction.action_start(context,
instance['uuid'],
action,
want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
QUOTAS.limit_check(context, injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
QUOTAS.limit_check(context, injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
vram_mb = int(instance_type.get('extra_specs', {}).get(VIDEO_RAM, 0))
req_ram = max_count * (instance_type['memory_mb'] + vram_mb)
# Check the quota
try:
reservations = QUOTAS.reserve(context, instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // (instance_type['memory_mb'] +
vram_mb))
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. %(msg)s"),
params)
else:
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, reservations
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
QUOTAS.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
LOG.warn(_("Quota exceeded for %(pid)s, tried to set "
"%(num_metadata)s metadata properties"),
{'pid': context.project_id,
'num_metadata': num_metadata})
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in metadata.iteritems():
if not isinstance(k, six.string_types):
msg = _("Metadata property key '%s' is not a string.") % k
raise exception.InvalidMetadata(reason=msg)
if not isinstance(v, six.string_types):
msg = (_("Metadata property value '%(v)s' for key '%(k)s' is "
"not a string.") % {'v': v, 'k': k})
raise exception.InvalidMetadata(reason=msg)
if len(k) == 0:
msg = _("Metadata property key blank")
raise exception.InvalidMetadata(reason=msg)
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
return self.network_api.validate_networks(context, requested_networks,
max_count)
@staticmethod
def _handle_kernel_and_ramdisk(context, kernel_id, ramdisk_id, image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
image_service, kernel_id = glance.get_remote_image_service(
context, kernel_id)
image_service.show(context, kernel_id)
if ramdisk_id is not None:
image_service, ramdisk_id = glance.get_remote_image_service(
context, ramdisk_id)
image_service.show(context, ramdisk_id)
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
if forced_host:
check_policy(context, 'create:forced_host', {})
if forced_node:
check_policy(context, 'create:forced_host', {})
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance['uuid'],
'name': instance['display_name'],
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance['display_name']
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
# Image checks don't apply when building from volume
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.FlavorDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.FlavorDiskTooSmall()
def _check_and_transform_bdm(self, base_options, image_meta, min_count,
max_count, block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# Get the block device mappings defined by the image.
image_defined_bdms = \
image_meta.get('properties', {}).get('block_device_mapping', [])
legacy_image_defined = not image_meta.get(
'properties', {}).get('bdm_v2', False)
if not legacy_image_defined:
image_defined_bdms = map(block_device.BlockDeviceDict,
image_defined_bdms)
if legacy_bdm:
if legacy_image_defined:
block_device_mapping += image_defined_bdms
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name)
else:
root_in_image_bdms = block_device.get_root_bdm(
image_defined_bdms) is not None
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms) + image_defined_bdms
else:
# NOTE (ndipanov): client will insert an image mapping into the v2
# block_device_mapping, but if there is a bootable device in image
# mappings - we need to get rid of the inserted image.
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
root_in_image_bdms = block_device.get_root_bdm(
image_defined_bdms) is not None
if image_ref and root_in_image_bdms:
block_device_mapping = [bdm for bdm in block_device_mapping
if not (
bdm.get('source_type') == 'image'
and bdm.get('boot_index') == 0)]
block_device_mapping += image_defined_bdms
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_mapping
def _get_image(self, context, image_href):
if not image_href:
return None, {}
(image_service, image_id) = glance.get_remote_image_service(
context, image_href)
image = image_service.show(context, image_id)
return image_id, image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
if image_id is not None:
self._check_requested_image(context, image_id,
image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name:
key_pair = keypair_obj.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.properties_root_device_name(
boot_meta.get('properties', {}))
system_metadata = flavors.save_flavor_info(
dict(), instance_type)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping):
# Reserve quotas
num_instances, quota_reservations = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug(_("Going to run %s instances...") % num_instances)
instances = []
try:
for i in xrange(num_instances):
instance = instance_obj.Instance()
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i)
instances.append(instance)
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
QUOTAS.rollback(context, quota_reservations)
# Commit the reservations
QUOTAS.commit(context, quota_reservations)
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if legacy_bdm and bdm.get('device_name') != 'vda':
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_service.show(context, image_id)
return image_meta.get('properties', {})
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif bdm.get('volume_id'):
try:
volume_id = bdm['volume_id']
volume = self.volume_api.get(context, volume_id)
return volume.get('volume_image_metadata', {})
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
return {}
@staticmethod
def _update_instance_group_by_name(context, instance_uuids, group_name):
try:
ig = instance_group_obj.InstanceGroup.get_by_name(context,
group_name)
instance_group_obj.InstanceGroup.add_members(context, ig.uuid,
instance_uuids)
except exception.InstanceGroupNotFound:
# NOTE(russellb) If the group does not already exist, we need to
# automatically create it to be backwards compatible with old
# handling of the 'group' scheduler hint. The policy type will be
# 'legacy', indicating that this group was created to emulate
# legacy group behavior.
ig = instance_group_obj.InstanceGroup()
ig.name = group_name
ig.project_id = context.project_id
ig.user_id = context.user_id
ig.policies = ['legacy']
ig.members = instance_uuids
ig.create(context)
@staticmethod
def _update_instance_group(context, instances, scheduler_hints):
if not scheduler_hints:
return
group_hint = scheduler_hints.get('group')
if not group_hint:
return
instance_uuids = [instance.uuid for instance in instances]
if uuidutils.is_uuid_like(group_hint):
instance_group_obj.InstanceGroup.add_members(context, group_hint,
instance_uuids)
else:
API._update_instance_group_by_name(context, instance_uuids,
group_hint)
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = {}
boot_meta['properties'] = \
self._get_bdm_image_metadata(context,
block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
block_device_mapping, auto_disk_config, reservation_id,
max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# considertaion of connections to each requested network
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug(_("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota"),
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(
base_options, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host, forced_node, instance_type)
self._update_instance_group(context, instances, scheduler_hints)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, instance_uuid, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug(_("Image bdm %s"), bdm, instance_uuid=instance_uuid)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _update_block_device_mapping(self, elevated_context,
instance_type, instance_uuid,
block_device_mapping):
"""tell vm driver to attach volume at boot time by updating
BlockDeviceMapping
"""
LOG.debug(_("block_device_mapping %s"), block_device_mapping,
instance_uuid=instance_uuid)
for bdm in block_device_mapping:
bdm['volume_size'] = self._volume_size(instance_type, bdm)
if bdm.get('volume_size') == 0:
continue
bdm['instance_uuid'] = instance_uuid
self.db.block_device_mapping_update_or_create(elevated_context,
bdm,
legacy=False)
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm['boot_index']
for bdm in all_mappings
if bdm.get('boot_index') is not None
and bdm.get('boot_index') >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.get('snapshot_id')
volume_id = bdm.get('volume_id')
image_id = bdm.get('image_id')
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm['source_type'] == 'image' and
bdm['destination_type'] == 'volume' and
not bdm['volume_size']):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.get('volume_size') or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].get('volume_size') or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.get('destination_type') == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_for_bdm(self, context, instance, instance_type,
image, block_device_mapping):
"""Populate instance block device mapping information."""
instance_uuid = instance['uuid']
image_properties = image.get('properties', {})
image_mapping = image_properties.get('mappings', [])
if image_mapping:
image_mapping = self._prepare_image_mapping(instance_type,
instance_uuid, image_mapping)
self._validate_bdm(context, instance, instance_type,
block_device_mapping + image_mapping)
for mapping in (image_mapping, block_device_mapping):
if not mapping:
continue
self._update_block_device_mapping(context,
instance_type, instance_uuid, mapping)
def _populate_instance_shutdown_terminate(self, instance, image,
block_device_mapping):
"""Populate instance shutdown_terminate information."""
image_properties = image.get('properties', {})
if (block_device_mapping or
image_properties.get('mappings') or
image_properties.get('block_device_mapping')):
instance.shutdown_terminate = False
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance['uuid'])
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance['uuid'] = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance['system_metadata'] = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance['image_ref'])
instance['system_metadata'].update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
#NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
self._populate_instance_shutdown_terminate(instance, image,
block_device_mapping)
self.security_group_api.ensure_default(context)
instance.create(context)
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._populate_instance_for_bdm(context, instance,
instance_type, image, block_device_mapping)
except exception.InvalidBDM:
with excutils.save_and_reraise_exception():
instance.destroy(context)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
check_policy(context, 'create', target)
if requested_networks:
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for net, ip, port in requested_networks:
if port:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1 and utils.is_neutron():
self._check_multiple_instances_neutron_ports(requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
services = service_obj.ServiceList.get_all_by_topic(context,
CONF.compute_topic)
for service in services:
host_name = service.host
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
:param context: The security context
:param instance: The instance to update
:param kwargs: All additional keyword args are treated
as data fields of the instance to be
updated
:returns: A reference to the updated instance
"""
refs = self._update(context, instance, **kwargs)
return refs[1]
def _update(self, context, instance, **kwargs):
# Update the instance record and send a state update notification
# if task or vm state changed
old_ref, instance_ref = self.db.instance_update_and_get_original(
context, instance['uuid'], kwargs)
notifications.send_update(context, old_ref,
instance_ref, service="api")
return dict(old_ref.iteritems()), dict(instance_ref.iteritems())
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_('instance termination disabled'),
instance=instance)
return
host = instance['host']
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
reservations = None
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
# At these states an instance has a snapshot associate.
if instance['vm_state'] in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info(_("Working on deleting snapshot %s "
"from shelved instance..."),
snapshot_id, instance=instance)
try:
self.image_service.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception as exc:
LOG.exception(_("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
original_task_state = instance.task_state
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
new_type_id = instance.instance_type_id
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
reservations = self._create_reservations(context,
instance,
new_type_id,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
if not host:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_up = False
try:
service = service_obj.Service.get_by_compute_host(
context.elevated(), instance.host)
if self.servicegroup_api.service_is_up(service):
is_up = True
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_('Instance is already in deleting state, '
'ignoring this request'), instance=instance)
if reservations:
QUOTAS.rollback(context, reservations,
project_id=project_id,
user_id=user_id)
return
self._record_action_start(context, instance,
instance_actions.DELETE)
cb(context, instance, bdms, reservations=reservations)
except exception.ComputeHostNotFound:
pass
if not is_up:
# If compute node isn't up, just delete from DB
self._local_delete(context, instance, bdms, delete_type, cb)
if reservations:
QUOTAS.commit(context,
reservations,
project_id=project_id,
user_id=user_id)
reservations = None
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
except Exception:
with excutils.save_and_reraise_exception():
if reservations:
QUOTAS.rollback(context,
reservations,
project_id=project_id,
user_id=user_id)
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
mig_cls = migration_obj.Migration
migration = None
for status in ('finished', 'confirming'):
try:
migration = mig_cls.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s') %
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_('Migration %s may have been confirmed during delete') %
migration.id, context=context, instance=instance)
return
quotas = self._reserve_quota_delta(context, deltas, instance)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, quotas.reservations,
cast=False)
def _create_reservations(self, context, old_instance, new_instance_type_id,
project_id, user_id):
instance_vcpus = old_instance['vcpus']
instance_memory_mb = old_instance['memory_mb']
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if old_instance['task_state'] in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
Migration = migration_obj.Migration
try:
migration = Migration.get_by_instance_and_status(
context.elevated(), old_instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
new_instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.FlavorNotFound:
LOG.warning(_("Flavor %d not found"), old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
vram_mb = int(old_inst_type['extra_specs']
.get(VIDEO_RAM, 0))
instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb)
LOG.debug(_("going to delete a resizing instance"))
reservations = QUOTAS.reserve(context,
project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return reservations
def _local_delete(self, context, instance, bdms, delete_type, cb):
LOG.warning(_("instance's host %s is down, deleting from "
"database") % instance['host'], instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
self.network_api.deallocate_for_instance(elevated,
instance)
# cleanup volumes
for bdm in bdms:
if bdm.is_volume:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(elevated, bdm.volume_id)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
err_str = _("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
bdm.destroy(context)
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_('Going to try to soft delete instance'),
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate instance"), instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
num_instances, quota_reservations = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
QUOTAS.commit(context, quota_reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, quota_reservations)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED],
must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete_instance(context, instance)
def force_stop(self, context, instance, do_cast=True):
LOG.debug(_("Going to try to stop instance"), instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug(_("Going to try to start instance"), instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
def get(self, context, instance_id, want_objects=False,
expected_attrs=None):
"""Get a single instance with the given instance_id."""
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
instance = instance_obj.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif utils.is_int_like(instance_id):
instance = instance_obj.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
raise exception.InstanceNotFound(instance_id=instance_id)
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, sort_key='created_at',
sort_dir='desc', limit=None, marker=None, want_objects=False,
expected_attrs=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be returned sorted in the order specified by the
'sort_dir' parameter using the key specified in the 'sort_key'
parameter.
"""
#TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug(_("Searching by: %s") % str(search_opts))
# Fixups for the DB call
filters = {}
def | (flavor_id):
flavor = flavor_obj.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in search_opts.iteritems():
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
inst_models = self._get_instances_by_filters(context, filters,
sort_key, sort_dir, limit=limit, marker=marker,
expected_attrs=expected_attrs)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
def _get_instances_by_filters(self, context, filters,
sort_key, sort_dir,
limit=None,
marker=None, expected_attrs=None):
if 'ip6' in filters or 'ip' in filters:
res = self.network_api.get_instance_uuids_by_ip_filter(context,
filters)
# NOTE(jkoelker) It is possible that we will get the same
# instance uuid twice (one for ipv4 and ipv6)
uuids = set([r['instance_uuid'] for r in res])
filters['uuid'] = uuids
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
return instance_obj.InstanceList.get_by_filters(
context, filters=filters, sort_key=sort_key, sort_dir=sort_dir,
limit=limit, marker=marker, expected_attrs=fields)
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
image_meta = self._create_image(context, instance, name,
'backup', extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance['uuid']
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
image_ref = instance.image_ref
sent_meta = compute_utils.get_image_metadata(
context, self.image_service, image_ref, instance)
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.db.sqlalchemy.models.Instance
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
image_meta['is_public'] = False
properties = image_meta['properties']
if instance['root_device_name']:
properties['root_device_name'] = instance['root_device_name']
properties.update(extra_properties or {})
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
mapping = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
bdm)
mapping_dict = mapping_dict.get_image_mapping()
else:
mapping_dict = bdm.get_image_mapping()
mapping.append(mapping_dict)
# NOTE (ndipanov): Remove swap/ephemerals from mappings as they will be
# in the block_device_mapping for the new image.
image_mappings = properties.get('mappings')
if image_mappings:
properties['mappings'] = [m for m in image_mappings
if not block_device.is_swap_or_ephemeral(
m['virtual'])]
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
for attr in ('status', 'location', 'id'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_service.create(context, image_meta, data='')
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=set(
vm_states.ALLOW_SOFT_REBOOT + vm_states.ALLOW_HARD_REBOOT),
task_state=[None, task_states.REBOOTING,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance['vm_state'] not in vm_states.ALLOW_SOFT_REBOOT)):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance['uuid'],
state=instance['vm_state'],
method='reboot')
if ((reboot_type == 'SOFT' and
instance['task_state'] in
(task_states.REBOOTING, task_states.REBOOTING_HARD)) or
(reboot_type == 'HARD' and
instance['task_state'] == task_states.REBOOTING_HARD)):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance['uuid'],
state=instance['task_state'],
method='reboot')
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=[None, task_states.REBOOTING])
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_rpcapi.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
quotas = self._reserve_quota_delta(context, deltas, instance)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback(context)
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit(context)
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = migration_obj.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
quotas = self._reserve_quota_delta(context, deltas, instance)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit(context)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
quotas.reservations or [])
@staticmethod
def _resize_quota_delta(context, new_flavor,
old_flavor, sense, compare):
"""Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_instance_type: the target instance type
:param old_instance_type: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_flavor, old_flavor):
"""Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['old_instance_type_id'])
new_flavor = flavor_obj.Flavor.get_by_id(
context, migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_flavor, old_flavor, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""Calculate deltas required to adjust quota for an instance downsize.
"""
old_flavor = instance.get_flavor('old')
new_flavor = instance.get_flavor('new')
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, instance):
"""If there are deltas to reserve, construct a Quotas object and
reserve the deltas for the given project.
@param context: The nova request context.
@param deltas: A dictionary of the proposed delta changes.
@param instance: The instance we're operating on, so that
quotas can use the correct project_id/user_id.
@return: nova.objects.quotas.Quotas
"""
quotas = quotas_obj.Quotas()
if deltas:
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
quotas.reserve(context, project_id=project_id, user_id=user_id,
**deltas)
return quotas
@staticmethod
def _resize_cells_support(context, quotas, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# With cells, the best we can do right now is commit the
# reservations immediately...
quotas.commit(context)
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = migration_obj.Migration()
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.create(context.elevated())
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = flavors.extract_flavor(instance)
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug(_("flavor_id is None. Assuming migration."),
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s"),
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
quotas = self._reserve_quota_delta(context, deltas, instance)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
headroom = exc.kwargs['headroom']
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used, allowed=total_allowed,
resource=resource)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance['host'])
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type)
self._record_action_start(context, instance, instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED],
task_state=[None])
def shelve(self, context, instance):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
image_id = None
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance['display_name']
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED], task_state=[None])
def shelve_offload(self, context, instance):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED], task_state=[None])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None):
"""Rescue the given instance."""
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance['uuid'])
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance['uuid'],
reason=reason)
self.update(context,
instance,
task_state=task_states.RESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
self.update(context,
instance,
task_state=task_states.UNRESCUING,
expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
self.update(context,
instance,
task_state=task_states.UPDATING_PASSWORD,
expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_rdp_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance['uuid'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug(_('Locking'), context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug(_('Unlocking'), context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance.
This method is separated to make it possible for cells version
to override it.
"""
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
device = self.compute_rpcapi.reserve_block_device_name(
context, device=device, instance=instance, volume_id=volume_id,
disk_bus=disk_bus, device_type=device_type)
volume_bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device, bdm=volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy(context)
return device
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED],
task_state=[None])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
return self._attach_volume(context, instance, volume_id, device,
disk_bus, device_type)
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED],
task_state=[None])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance['uuid']:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED],
task_state=[None])
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance['uuid']:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception: # pylint: disable=W0702
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
@check_instance_lock
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
@check_instance_lock
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
rv = self.db.instance_metadata_get(context, instance['uuid'])
return dict(rv.iteritems())
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
def _match_any(pattern_list, string):
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(instance, search_filt, input_metadata):
uuids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if uuids and instance.get('uuid') not in uuids:
return {}
for (k, v) in input_metadata.iteritems():
# Both keys and value defined -- AND
if ((keys_filter and values_filter) and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
instances = self._get_instances_by_filters(context, filters={},
sort_key='created_at',
sort_dir='desc')
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
metadata = instance.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(instance, filt, metadata)
for (k, v) in metadata.iteritems():
formatted_metadata_list.append({'key': k, 'value': v,
'instance_id': instance.get('uuid')})
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return formatted_metadata_list
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance['uuid'] for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance['image_ref']:
return True
if bdms is None:
bdms = block_device_obj.BlockDeviceMappingList.\
get_by_instance_uuid(context, instance['uuid'])
root_bdm = bdms.root_bdm()
if not root_bdm:
return False
return root_bdm.is_volume
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
"""
LOG.debug(_('vm evacuation scheduled'))
inst_host = instance['host']
service = service_obj.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
msg = (_('Instance compute service state on %s '
'expected to be down, but it was up.') % inst_host)
LOG.error(msg)
raise exception.ComputeServiceInUse(host=inst_host)
instance = self.update(context, instance, expected_task_state=[None],
task_state=task_states.REBUILDING)
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NOTE(danms): Transitional until evacuate supports objects
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
return self.compute_rpcapi.rebuild_instance(context,
instance=inst_obj,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return migration_obj.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm.instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm.instance,
volume_id, snapshot_id, delete_info)
def external_instance_event(self, context, instances, events):
# NOTE(danms): The external API consumer just provides events,
# but doesn't know where they go. We need to collate lists
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = {}
events_by_host = {}
hosts_by_instance = {}
for instance in instances:
instances_on_host = instances_by_host.get(instance.host, [])
instances_on_host.append(instance)
instances_by_host[instance.host] = instances_on_host
hosts_by_instance[instance.uuid] = instance.host
for event in events:
host = hosts_by_instance[event.instance_uuid]
events_on_host = events_by_host.get(host, [])
events_on_host.append(event)
events_by_host[host] = events_on_host
for host in instances_by_host:
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = service_obj.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = service_obj.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in filters.iteritems():
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return service_obj.Service.get_by_compute_host(context, host_name)
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
service = service_obj.Service.get_by_args(context, host_name,
binary)
service.update(params_to_update)
service.save()
return service
def service_delete(self, context, service_id):
"""Deletes the specified service."""
service_obj.Service.get_by_id(context, service_id).destroy()
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return self.db.instance_get_all_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return self.db.compute_node_get(context, int(compute_id))
def compute_node_get_all(self, context):
return self.db.compute_node_get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return self.db.compute_node_search_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return instance_action.InstanceActionList.get_by_instance_uuid(
context, instance['uuid'])
def action_get_by_request_id(self, context, instance, request_id):
return instance_action.InstanceAction.get_by_request_id(
context, instance['uuid'], request_id)
def action_events_get(self, context, instance, action_id):
return instance_action.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create(context)
aggregate = self._reformat_aggregate_info(aggregate)
# To maintain the same API result as before.
del aggregate['hosts']
del aggregate['metadata']
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
return self._reformat_aggregate_info(aggregate)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
aggregates = aggregate_obj.AggregateList.get_all(context)
return [self._reformat_aggregate_info(agg) for agg in aggregates]
def is_safe_to_update_az(self, context, aggregate, metadata,
action_name):
"""Determine if updates alter an aggregate's availability zone."""
if 'availability_zone' in metadata:
aggregate_az = aggregate.metadata.get("availability_zone")
for host in aggregate.hosts:
host_az = availability_zones.get_host_availability_zone(
context, host)
if (host_az and host_az != metadata["availability_zone"]
and host_az != CONF.default_availability_zone and
host_az != aggregate_az):
msg = _("This aggregate contains hosts in"
" an existing availability zone")
raise exception.InvalidAggregateAction(
action=action_name,
aggregate_id=aggregate.id,
reason=msg)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
self.is_safe_to_update_az(context, aggregate,
values, "update aggregate")
if values:
aggregate.metadata = values
aggregate.save()
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, aggregate,
metadata, "update aggregate metadata")
aggregate.update_metadata(metadata)
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = aggregate_obj.Aggregate.get_by_id(context,
aggregate_id)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateAction(action='delete',
aggregate_id=aggregate_id,
reason=msg)
aggregate.destroy()
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def _check_az_for_host(self, aggregate_meta, host_az, aggregate_id):
# NOTE(mtreinish) The availability_zone key returns a set of
# zones so loop over each zone. However there should only
# ever be one zone in the set because an aggregate can only
# have a single availability zone set at one time.
for aggregate_az in aggregate_meta["availability_zone"]:
# NOTE(mtreinish) Ensure that the aggregate_az is not none
# if it is none then that is just a regular aggregate and
# it is valid to have a host in multiple aggregates.
if aggregate_az and aggregate_az != host_az:
msg = _("Host already in availability zone "
"%s") % host_az
action_name = "add_host_to_aggregate"
raise exception.InvalidAggregateAction(
action=action_name, aggregate_id=aggregate_id,
reason=msg)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
host_az = availability_zones.get_host_availability_zone(context,
host_name)
if host_az and host_az != CONF.default_availability_zone:
aggregate_meta = self.db.aggregate_metadata_get_by_metadata_key(
context, aggregate_id, 'availability_zone')
if aggregate_meta.get("availability_zone"):
self._check_az_for_host(aggregate_meta, host_az, aggregate_id)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.add_host(context, host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
service_obj.Service.get_by_compute_host(context, host_name)
aggregate = aggregate_obj.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return self._reformat_aggregate_info(aggregate)
def _reformat_aggregate_info(self, aggregate):
"""Builds a dictionary with aggregate props, metadata and hosts."""
return dict(aggregate.iteritems())
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
if not 0 < len(key_name) < 256:
raise exception.InvalidKeypair(
reason=_('Keypair name must be between '
'1 and 255 characters long'))
count = QUOTAS.count(context, 'key_pairs', user_id)
try:
QUOTAS.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'import.start', key_name)
fingerprint = crypto.generate_fingerprint(public_key)
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = crypto.generate_key_pair()
keypair = keypair_obj.KeyPair()
keypair.user_id = user_id
keypair.name = key_name
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create(context)
self._notify(context, 'create.end', key_name)
return keypair, private_key
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
keypair_obj.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return keypair_obj.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return keypair_obj.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.security_group_rpcapi = compute_rpcapi.SecurityGroupAPI()
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
if not val:
msg = _("Security group %s cannot be empty.") % property
self.raise_invalid_property(msg)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
if len(val) > 255:
msg = _("Security group %s should not be greater "
"than 255 characters.") % property
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
try:
reservations = QUOTAS.reserve(context, security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.audit(_("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
QUOTAS.commit(context, reservations)
except Exception:
with excutils.save_and_reraise_exception():
QUOTAS.rollback(context, reservations)
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
quotas = quotas_obj.Quotas()
quota_project, quota_user = quotas_obj.ids_from_security_group(
context, security_group)
try:
quotas.reserve(context, project_id=quota_project,
user_id=quota_user, security_groups=-1)
except Exception:
LOG.exception(_("Failed to update usages deallocating "
"security group"))
LOG.audit(_("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
quotas.commit()
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance['uuid']
#check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance['host'])
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = QUOTAS.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
QUOTAS.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Authorize security group ingress %s")
LOG.audit(msg, name, context=context)
rules = [self.db.security_group_rule_create(context, v) for v in vals]
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Revoke security group ingress %s")
LOG.audit(msg, security_group['name'], context=context)
for rule_id in rule_ids:
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
is_duplicate = True
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
is_duplicate = False
break
if is_duplicate:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
try:
return self.db.security_group_default_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance['host'] is not None:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance['uuid'] not in instances:
instances[instance['uuid']] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance['host']:
self.security_group_rpcapi.refresh_instance_security_rules(
context, instance['host'], instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = self.db.instance_get_by_uuid(context, instance_uuid)
groups = instance.get('security_groups')
if groups:
return [{'name': group['name']} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
| _remap_flavor_filter |
aggregator.rs | use crate::statsd::{Message, MessageKind};
use std::collections::HashMap;
use std::fmt;
use std::io::Write;
use std::time::{SystemTime, UNIX_EPOCH};
#[derive(PartialEq, Debug)]
enum Data {
Count(f64),
}
impl fmt::Display for Data {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Data::Count(v) => write!(f, "{}", v),
}
}
}
struct Metric {
key: String,
kind: MessageKind,
data: Data,
}
impl Metric {
fn new(key: &str, kind: MessageKind) -> Metric {
Metric {
key: key.to_string(),
kind: kind,
data: match kind {
MessageKind::Counter => Data::Count(0.0),
},
}
}
fn record(&mut self, value: f64) {
// TODO should I check kind here as well?
self.data = match self.data {
Data::Count(v) => Data::Count(v + value),
};
}
fn flush<T: Write>(&mut self, output: &mut T, timestamp: u64) {
match self.kind {
MessageKind::Counter => match self.data {
Data::Count(v) => {
write!(output, "{} {} {}\n", self.key, v, timestamp);
self.data = Data::Count(0.0);
}
},
}
}
}
pub struct Aggregator {
data: HashMap<String, Metric>,
}
impl Aggregator {
pub fn new() -> Aggregator {
Aggregator {
data: HashMap::new(),
}
}
pub fn handle(&mut self, msg: &Message) {
match self.data.get_mut(msg.key) {
Some(m) => {
// TODO what happens when the metric type doesn't match?
if msg.kind != m.kind |
m.record(msg.value);
}
None => {
let mut metric = Metric::new(msg.key, msg.kind);
metric.record(msg.value);
self.data.insert(msg.key.to_string(), metric);
}
}
}
#[allow(dead_code)]
pub fn dump(&self) {
for (key, metric) in &self.data {
println!("Aggregator stats:");
println!("{} ({:?}): {}", key, metric.kind, metric.data);
println!("");
}
}
pub fn flush<T: Write>(&mut self, output: &mut T) {
let start = SystemTime::now();
let unix = start.duration_since(UNIX_EPOCH).unwrap();
if self.data.is_empty() {
return;
}
println!("Flushing {} metrics...", self.data.len());
for metric in self.data.values_mut() {
metric.flush(output, unix.as_secs());
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn counter_record() {
let mut metric = Metric::new("test.key", MessageKind::Counter);
assert_eq!(metric.data, Data::Count(0.0));
metric.record(42.0);
metric.record(-20.0);
assert_eq!(metric.data, Data::Count(22.0));
}
#[test]
fn counter_flush() {
let mut metric = Metric::new("test.key", MessageKind::Counter);
metric.record(23.45);
let mut output = vec![];
metric.flush(&mut output, 12345);
assert_eq!(output, "test.key 23.45 12345\n".as_bytes());
assert_eq!(metric.data, Data::Count(0.0));
}
}
| {
return;
} |
copyStateHelper.js | export default function deepCopy (objectpassed) {
// arrays return "object" in js so any object or array will be looped to copy deeply.
// strings, numbers and booleans are deep copied naturally, while any objects within arrays or objects
// or objects within arrays are only copied shallowly.
if (objectpassed === null || typeof objectpassed !== 'object') {
return objectpassed | var temporaryStorage = objectpassed.constructor()
for (var key in objectpassed) {
temporaryStorage[key] = deepCopy((objectpassed[key]))
}
return temporaryStorage
} | }
|
monitoring.go | /* Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package monitoring
import (
ctx "context"
"errors"
"fmt"
"io/ioutil"
"os"
"regexp"
"strconv"
"strings"
"time"
log "github.com/golang/glog"
"github.com/google/gnxi/utils/xpath"
"github.com/ramonfontes/link022/agent/context"
"github.com/ramonfontes/link022/agent/gnmi"
"github.com/ramonfontes/link022/agent/syscmd"
)
const (
statesUpdateDelay = 15 * time.Second
systemClockTick = 100
physicalMemoryPath = "/access-points/access-point[hostname=$hostname]/system/memory/state/physical"
cpuUsagePath = "/access-points/access-point[hostname=$hostname]/system/cpus/cpu[index=$index]/state/total/instant"
channelPath = "/access-points/access-point[hostname=$hostname]/radios/radio[id=$id]/state/channel"
widthPath = "/access-points/access-point[hostname=$hostname]/radios/radio[id=$id]/state/channel-width"
frequencyPath = "/access-points/access-point[hostname=$hostname]/radios/radio[id=$id]/state/operating-frequency"
txpowerPath = "/access-points/access-point[hostname=$hostname]/radios/radio[id=$id]/state/transmit-power"
selfMemPath = "/access-points/access-point[hostname=$hostname]/system/processes/process[pid=$pid]/state/memory-usage"
selfCPUPath = "/access-points/access-point[hostname=$hostname]/system/processes/process[pid=$pid]/state/cpu-utilization"
)
var cmdRunner = syscmd.Runner()
// UpdateDeviceStatus periodically collect AP device stats
// and update their corresponding nodes in OpenConfig Model tree.
func UpdateDeviceStatus(bkgdContext ctx.Context, gnmiServer *gnmi.Server) {
deviceConfig := context.GetDeviceConfig()
hostName := deviceConfig.Hostname
wLANINTFName := deviceConfig.WLANINTFName
for {
select {
case <-bkgdContext.Done():
return
case <-time.After(statesUpdateDelay):
}
if err := updateMemoryInfo(gnmiServer, hostName); err != nil {
log.Errorf("Error in updating memory info: %v", err)
}
if err := updateCPUInfo(gnmiServer, hostName); err != nil {
log.Errorf("Error in updating CPU info: %v", err)
}
if err := updateAPInfo(gnmiServer, hostName, wLANINTFName); err != nil {
log.Errorf("Error in updating AP info: %v", err)
}
}
}
func updateMemoryInfo(s *gnmi.Server, hostName string) error {
b, err := ioutil.ReadFile("/proc/meminfo")
if err != nil {
return err
}
memStr := string(b)
reFree := regexp.MustCompile("MemTotal:\\s+(\\d+)")
match := reFree.FindStringSubmatch(memStr)
if len(match) != 2 {
return errors.New("No Memory Free info in /proc/meminfo")
}
strPath := strings.Replace(physicalMemoryPath, "$hostname", hostName, 1)
pbPath, err := xpath.ToGNMIPath(strPath)
if err != nil {
return err
}
physicalMemory, err := strconv.ParseInt(match[1], 10, 64)
if err != nil {
return err
}
stateOpt := gnmi.GNXIStateOptGenerator(pbPath, uint64(physicalMemory*1024), gnmi.InternalUpdateState)
if err = s.InternalUpdate(stateOpt); err != nil {
return err
}
pid := os.Getpid()
spid := fmt.Sprint(pid)
filePath := fmt.Sprintf("/proc/%v/status", pid)
b, err = ioutil.ReadFile(filePath)
if err != nil {
log.Errorf("failed open %v: %v", filePath, err)
return err
}
memStr = string(b)
reSelfMem := regexp.MustCompile("VmRSS:\\s+(\\d+)")
match = reSelfMem.FindStringSubmatch(memStr)
if len(match) != 2 {
return fmt.Errorf("No Memory info in: %v", filePath)
}
p := strings.Replace(selfMemPath, "$pid", spid, 1)
p = strings.Replace(p, "$hostname", hostName, 1)
pbPath, err = xpath.ToGNMIPath(p)
if err != nil {
return err
}
selfMemory, err := strconv.ParseInt(match[1], 10, 64)
if err != nil {
return err
}
stateOpt = gnmi.GNXIStateOptGenerator(pbPath, uint64(selfMemory*1024), gnmi.InternalUpdateState)
if err = s.InternalUpdate(stateOpt); err != nil {
log.Errorf("update state failed: %v", err)
return err
}
return nil
}
func updateCPUInfo(s *gnmi.Server, hostName string) error {
pid := os.Getpid()
spid := fmt.Sprint(pid)
filePath := fmt.Sprintf("/proc/%v/stat", pid)
b0, err := ioutil.ReadFile(filePath)
if err != nil {
log.Errorf("failed open %v: %v", filePath, err)
return err
}
time.Sleep(1 * time.Second)
b1, err := ioutil.ReadFile(filePath)
if err != nil {
log.Errorf("failed open %v: %v", filePath, err)
return err
}
cpuStr0 := strings.Split(string(b0), " ")
cpuStr1 := strings.Split(string(b1), " ")
if len(cpuStr0) < 14 || len(cpuStr1) < 14 {
return errors.New("cpu info not correct")
}
up0, err := strconv.ParseInt(cpuStr0[13], 10, 64)
if err != nil {
log.Errorf("failed convert string to int: %v", err)
return err
}
up1, err := strconv.ParseInt(cpuStr1[13], 10, 64)
if err != nil {
log.Errorf("failed convert string to int: %v", err)
return err
}
cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo")
if err != nil {
log.Errorf("failed open %v: %v", "/proc/cpuinfo", err)
return err
}
cpuCount := strings.Count(string(cpuinfo), "processor")
cpuUtil := (up1 - up0) / (systemClockTick * int64(cpuCount))
p := strings.Replace(selfCPUPath, "$pid", spid, 1)
p = strings.Replace(p, "$hostname", hostName, 1)
pbPath, err := xpath.ToGNMIPath(p)
if err != nil {
return err
}
stateOpt := gnmi.GNXIStateOptGenerator(pbPath, uint8(cpuUtil), gnmi.InternalUpdateState)
if err = s.InternalUpdate(stateOpt); err != nil {
log.Errorf("update state failed: %v", err)
return err
}
return nil
}
func updateAPInfo(s *gnmi.Server, hostName string, wLANINTFName string) error {
apInfoString, err := cmdRunner.GetAPStates()
if err != nil {
return err
} | for _, apInfo := range apInfos {
wlanName := apInfo[1]
channelStr := apInfo[3]
widthStr := apInfo[4]
txpowerStr := apInfo[5]
// Because radio info is not in IW command's output.
// The radio id is hard-coded here
phyIDStr := "1"
if strings.Compare(wlanName, wLANINTFName) != 0 {
continue
}
p := strings.Replace(channelPath, "$id", phyIDStr, 1)
p = strings.Replace(p, "$hostname", hostName, 1)
pbPath, err := xpath.ToGNMIPath(p)
if err != nil {
return fmt.Errorf("convert %v to GNMI path failed: %v", p, err)
}
channel, err := strconv.ParseInt(channelStr, 10, 8)
if err != nil {
log.Errorf("failed convert string to int: %v", err)
return err
}
stateOpt := gnmi.GNXIStateOptGenerator(pbPath, uint8(channel), gnmi.InternalUpdateState)
if err = s.InternalUpdate(stateOpt); err != nil {
return fmt.Errorf("update state failed: %v", err)
}
p = strings.Replace(widthPath, "$id", phyIDStr, 1)
p = strings.Replace(p, "$hostname", hostName, 1)
pbPath, err = xpath.ToGNMIPath(p)
if err != nil {
return fmt.Errorf("convert %v to GNMI path failed: %v", p, err)
}
width, err := strconv.ParseInt(widthStr, 10, 8)
if err != nil {
return fmt.Errorf("failed convert string to int: %v", err)
}
stateOpt = gnmi.GNXIStateOptGenerator(pbPath, uint8(width), gnmi.InternalUpdateState)
if err = s.InternalUpdate(stateOpt); err != nil {
log.Errorf("update state failed: %v", err)
return err
}
p = strings.Replace(txpowerPath, "$id", phyIDStr, 1)
p = strings.Replace(p, "$hostname", hostName, 1)
pbPath, err = xpath.ToGNMIPath(p)
if err != nil {
return fmt.Errorf("convert %v to GNMI path failed: %v", p, err)
}
txpower, err := strconv.ParseInt(txpowerStr, 10, 8)
if err != nil {
return fmt.Errorf("failed convert string to int: %v", err)
}
stateOpt = gnmi.GNXIStateOptGenerator(pbPath, uint8(txpower), gnmi.InternalUpdateState)
if err = s.InternalUpdate(stateOpt); err != nil {
return fmt.Errorf("update state failed: %v", err)
}
}
return nil
} | // If one interface has multiple ssid, match the first one
apRegex := regexp.MustCompile("Interface\\s([\\w-_]+)[\\S\\s]*?ssid\\s([\\w-_]+)[\\S\\s]*?channel\\s([\\d]+)[\\S\\s]*?width:\\s([\\d]+)[\\S\\s]*?txpower\\s([\\d]+)")
apInfos := apRegex.FindAllStringSubmatch(apInfoString, -1) |
player.go | package game
import (
"go-agar/internal/util"
"math"
"sort"
"time"
)
type Player struct {
Id string
Name string
cells []*Cell
X float64
Y float64
targetX float64
targetY float64
Color string
TextColor string
lastSplit time.Time
fireFood chan *MassFood
split chan *Cell
MassTotal float64
VisibleFoods []*Food
VisibleMassFoods []*MassFood
VisibleCells []*Cell
VisibleViruses []*Virus
}
type personSlice []*Player
func (s personSlice) Len() int { return len(s) }
func (s personSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s personSlice) Less(i, j int) bool { return s[i].MassTotal > s[j].MassTotal }
func | (players []*Player) {
sort.Sort(personSlice(players))
}
func NewPlayer(name string) *Player {
id := util.GenId()
if name == "" {
name = Config.AnonymousUserNamePrefix + id[:6]
}
mass := Config.DefaultPlayerMass
radius := util.MassToRadius(mass)
//x, y := util.RandomPosition(radius, Config.GameWidth, Config.GameHeight)
x, y := Config.GameWidth/2, Config.GameHeight/2
p := &Player{
Id: id,
Name: name,
X: x,
Y: y,
Color: util.Color(),
TextColor: "#000000",
fireFood: make(chan *MassFood, Config.CellMaxNum*10),
split: make(chan *Cell, Config.CellMaxNum),
MassTotal: mass,
}
c := p.addCell()
c.mass = mass
c.Radius = radius
return p
}
func (p *Player) IsDied() bool {
return len(p.cells) == 0 || int(p.MassTotal) == 0
}
func (p *Player) SplitAll() {
for _, v := range p.cells {
p.Split(v)
}
}
func (p *Player) Split(c *Cell) {
if len(p.cells) >= Config.CellMaxNum || c.mass < Config.DefaultPlayerMass*2 {
return
}
select {
case p.split <- c:
default:
}
}
func (p *Player) MoveTo(x, y float64) {
p.targetX = x
p.targetY = y
}
func (p *Player) FireFood() {
for _, c := range p.cells {
fireMass := Config.FireFoodRate * c.mass
if Config.FireFoodMass > fireMass {
fireMass = Config.FireFoodMass
}
if c.mass-fireMass <= Config.DefaultPlayerMass {
return
}
c.mass -= fireMass
p.MassTotal -= fireMass
mf := NewMassFood(p)
mf.mass = fireMass
mf.Radius = util.MassToRadius(fireMass)
mf.X = c.X
mf.Y = c.Y
mf.targetX = p.X - c.X + p.targetX
mf.targetY = p.Y - c.Y + p.targetY
mf.speed = Config.FireFoodSpeed
p.fireFood <- mf
}
}
func (p *Player) FireFoods() <-chan *MassFood {
return p.fireFood
}
func (p *Player) UpdateVisibleFoods(foods [] *Food) {
visibleFoods := make([]*Food, len(foods))
i := 0
for _, v := range foods {
if v.X > p.X-Config.ScreenWidth/2-20 &&
v.X < p.X+Config.ScreenWidth/2+20 &&
v.Y > p.Y-Config.ScreenHeight/2-20 &&
v.Y < p.Y+Config.ScreenHeight/2+20 {
visibleFoods[i] = v
i++
}
}
p.VisibleFoods = visibleFoods[:i]
}
func (p *Player) UpdateVisibleMassFoods(massFoods [] *MassFood) {
visibleMassFoods := make([]*MassFood, len(massFoods))
i := 0
for _, v := range massFoods {
if v.X > p.X-Config.ScreenWidth/2-v.Radius &&
v.X < p.X+Config.ScreenWidth/2+v.Radius &&
v.Y > p.Y-Config.ScreenHeight/2-v.Radius &&
v.Y < p.Y+Config.ScreenHeight/2+v.Radius {
visibleMassFoods[i] = v
i++
}
}
p.VisibleMassFoods = visibleMassFoods[:i]
}
func (p *Player) UpdateVisibleViruses(viruses []*Virus) {
visibleViruses := make([]*Virus, len(viruses))
i := 0
for _, v := range viruses {
if v.X > p.X-Config.ScreenWidth/2-20-v.Radius &&
v.X < p.X+Config.ScreenWidth/2+20+v.Radius &&
v.Y > p.Y-Config.ScreenHeight/2-20-v.Radius &&
v.Y < p.Y+Config.ScreenHeight/2+20+v.Radius {
visibleViruses[i] = v
i++
}
}
p.VisibleViruses = visibleViruses[:i]
}
func (p *Player) UpdateVisibleCells(players []*Player) {
// The maximum number of cells does not exceed the maximum number of divisions performed by all users
visibleCells := make([]*Cell, len(players)*Config.CellMaxNum)
i := 0
for _, p2 := range players {
for _, v := range p2.cells {
if v.X > p.X-Config.ScreenWidth/2-20-v.Radius &&
v.X < p.X+Config.ScreenWidth/2+20+v.Radius &&
v.Y > p.Y-Config.ScreenHeight/2-20-v.Radius &&
v.Y < p.Y+Config.ScreenHeight/2+20+v.Radius {
visibleCells[i] = v
i++
}
}
}
p.VisibleCells = visibleCells[:i]
}
func (p *Player) Update() {
p.updateSplit()
x, y := float64(0), float64(0)
for i := 0; i < len(p.cells); i++ {
c := p.cells[i]
if c.speed == 0 {
c.speed = Config.CellDefaultSpeed
}
p.moveCell(c)
p.mergeCell(i)
if len(p.cells) > i {
p.borderReboundCell(c)
x += c.X
y += c.Y
}
}
clen := float64(len(p.cells))
p.X = x / clen
p.Y = y / clen
}
func (p *Player) updateSplit() {
for {
select {
case c := <-p.split:
p.splitCell(c)
default:
return
}
}
}
func (p *Player) moveCell(c *Cell) {
targetX := p.X - c.X + p.targetX
targetY := p.Y - c.Y + p.targetY
dist := math.Sqrt(math.Pow(targetY, 2) + math.Pow(targetX, 2))
deg := math.Atan2(targetY, targetX)
//slow easy...
slowDown := float64(1)
if c.speed <= Config.CellDefaultSpeed {
slowDown = util.Log(c.mass, Config.SlowBase) - Config.InitMassLog + 1
}
deltaY := c.speed * math.Sin(deg) / slowDown
deltaX := c.speed * math.Cos(deg) / slowDown
if c.speed > Config.CellDefaultSpeed {
c.speed -= 0.5
}
//why 50 ?
if dist < (50 + c.Radius) {
deltaY *= dist / (50 + c.Radius)
deltaX *= dist / (50 + c.Radius)
}
c.Y += deltaY
c.X += deltaX
}
func (p *Player) mergeCell(i int) {
c := p.cells[i]
//merge or separate
mergePermit := p.lastSplit.Add(Config.MergeInterval).Before(time.Now())
for j := i + 1; j < len(p.cells); j++ {
c2 := p.cells[j]
distance := util.GetDistance(c.X, c.Y, 0, c2.X, c2.Y, 0)
radiusTotal := c.Radius + c2.Radius
if distance >= radiusTotal {
continue
}
if mergePermit && radiusTotal > distance * Config.CellMergeDistanceRate {
c.mass += c2.mass
c.Radius = util.MassToRadius(c.mass)
p.cells = append(p.cells[:j], p.cells[j+1:]...)
continue
}
if c.X < c2.X {
c.X --
} else if c.X > c2.X {
c.X ++
}
if c.Y < c2.Y {
c.Y --
} else if c.Y > c2.Y {
c.Y ++
}
}
}
func (p *Player) borderReboundCell(c *Cell) {
//why 3 ? it seems to overlap the border
borderCalc := c.Radius / 3
//border rebound
if c.X > Config.GameWidth-borderCalc {
c.X = Config.GameWidth - borderCalc
}
if c.X < borderCalc {
c.X = borderCalc
}
if c.Y > Config.GameHeight-borderCalc {
c.Y = Config.GameHeight - borderCalc
}
if c.Y < borderCalc {
c.Y = borderCalc
}
}
func (p *Player) splitCell(c *Cell) {
if len(p.cells) >= Config.CellMaxNum || c.mass < Config.DefaultPlayerMass*2 {
return
}
for _, v := range p.cells {
if v == c {
c.mass = c.mass / 2
c.Radius = util.MassToRadius(c.mass)
nc := p.addCell()
nc.X = c.X
nc.Y = c.Y
nc.mass = c.mass
nc.Radius = c.Radius
nc.speed = Config.SplitSpeed
p.lastSplit = time.Now()
return
}
}
}
func (p *Player) addCell() *Cell {
c := NewCell(p)
p.cells = append(p.cells, c)
return c
}
| SortPlayers |
mod.rs | #![allow(non_snake_case, non_camel_case_types, non_upper_case_globals, clashing_extern_declarations, clippy::all)]
#[link(name = "windows")]
extern "system" {
#[doc = "*Required features: 'Win32_Storage_OfflineFiles', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub fn OfflineFilesEnable(benable: super::super::Foundation::BOOL, pbrebootrequired: *mut super::super::Foundation::BOOL) -> u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub fn OfflineFilesQueryStatus(pbactive: *mut super::super::Foundation::BOOL, pbenabled: *mut super::super::Foundation::BOOL) -> u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles', 'Win32_Foundation'*"]
#[cfg(feature = "Win32_Foundation")]
pub fn OfflineFilesQueryStatusEx(pbactive: *mut super::super::Foundation::BOOL, pbenabled: *mut super::super::Foundation::BOOL, pbavailable: *mut super::super::Foundation::BOOL) -> u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub fn OfflineFilesStart() -> u32;
}
pub type IEnumOfflineFilesItems = *mut ::core::ffi::c_void;
pub type IEnumOfflineFilesSettings = *mut ::core::ffi::c_void;
pub type IOfflineFilesCache = *mut ::core::ffi::c_void;
pub type IOfflineFilesCache2 = *mut ::core::ffi::c_void;
pub type IOfflineFilesChangeInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesConnectionInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesDirectoryItem = *mut ::core::ffi::c_void;
pub type IOfflineFilesDirtyInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesErrorInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesEvents = *mut ::core::ffi::c_void;
pub type IOfflineFilesEvents2 = *mut ::core::ffi::c_void;
pub type IOfflineFilesEvents3 = *mut ::core::ffi::c_void;
pub type IOfflineFilesEvents4 = *mut ::core::ffi::c_void;
pub type IOfflineFilesEventsFilter = *mut ::core::ffi::c_void;
pub type IOfflineFilesFileItem = *mut ::core::ffi::c_void;
pub type IOfflineFilesFileSysInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesGhostInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesItem = *mut ::core::ffi::c_void;
pub type IOfflineFilesItemContainer = *mut ::core::ffi::c_void;
pub type IOfflineFilesItemFilter = *mut ::core::ffi::c_void;
pub type IOfflineFilesPinInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesPinInfo2 = *mut ::core::ffi::c_void;
pub type IOfflineFilesProgress = *mut ::core::ffi::c_void;
pub type IOfflineFilesServerItem = *mut ::core::ffi::c_void;
pub type IOfflineFilesSetting = *mut ::core::ffi::c_void;
pub type IOfflineFilesShareInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesShareItem = *mut ::core::ffi::c_void;
pub type IOfflineFilesSimpleProgress = *mut ::core::ffi::c_void;
pub type IOfflineFilesSuspend = *mut ::core::ffi::c_void;
pub type IOfflineFilesSuspendInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesSyncConflictHandler = *mut ::core::ffi::c_void;
pub type IOfflineFilesSyncErrorInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesSyncErrorItemInfo = *mut ::core::ffi::c_void;
pub type IOfflineFilesSyncProgress = *mut ::core::ffi::c_void;
pub type IOfflineFilesTransparentCacheInfo = *mut ::core::ffi::c_void;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_CACHING_MODE = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CACHING_MODE_NONE: OFFLINEFILES_CACHING_MODE = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CACHING_MODE_NOCACHING: OFFLINEFILES_CACHING_MODE = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CACHING_MODE_MANUAL: OFFLINEFILES_CACHING_MODE = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CACHING_MODE_AUTO_DOC: OFFLINEFILES_CACHING_MODE = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CACHING_MODE_AUTO_PROGANDDOC: OFFLINEFILES_CACHING_MODE = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CHANGES_LOCAL_ATTRIBUTES: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CHANGES_LOCAL_SIZE: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CHANGES_LOCAL_TIME: u32 = 4u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CHANGES_NONE: u32 = 0u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CHANGES_REMOTE_ATTRIBUTES: u32 = 16u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CHANGES_REMOTE_SIZE: u32 = 8u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CHANGES_REMOTE_TIME: u32 = 32u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_COMPARE = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_COMPARE_EQ: OFFLINEFILES_COMPARE = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_COMPARE_NEQ: OFFLINEFILES_COMPARE = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_COMPARE_LT: OFFLINEFILES_COMPARE = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_COMPARE_GT: OFFLINEFILES_COMPARE = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_COMPARE_LTE: OFFLINEFILES_COMPARE = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_COMPARE_GTE: OFFLINEFILES_COMPARE = 5i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_CONNECT_STATE = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CONNECT_STATE_UNKNOWN: OFFLINEFILES_CONNECT_STATE = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CONNECT_STATE_OFFLINE: OFFLINEFILES_CONNECT_STATE = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CONNECT_STATE_ONLINE: OFFLINEFILES_CONNECT_STATE = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CONNECT_STATE_TRANSPARENTLY_CACHED: OFFLINEFILES_CONNECT_STATE = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_CONNECT_STATE_PARTLY_TRANSPARENTLY_CACHED: OFFLINEFILES_CONNECT_STATE = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_DELETE_FLAG_ADMIN: u32 = 2147483648u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_DELETE_FLAG_DELMODIFIED: u32 = 4u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_DELETE_FLAG_NOAUTOCACHED: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_DELETE_FLAG_NOPINNED: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ENCRYPTION_CONTROL_FLAG_ASYNCPROGRESS: u32 = 1024u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ENCRYPTION_CONTROL_FLAG_BACKGROUND: u32 = 65536u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ENCRYPTION_CONTROL_FLAG_CONSOLE: u32 = 4096u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ENCRYPTION_CONTROL_FLAG_INTERACTIVE: u32 = 2048u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ENCRYPTION_CONTROL_FLAG_LOWPRIORITY: u32 = 512u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ENUM_FLAT: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ENUM_FLAT_FILESONLY: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_EVENTS = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_CACHEMOVED: OFFLINEFILES_EVENTS = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_CACHEISFULL: OFFLINEFILES_EVENTS = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_CACHEISCORRUPTED: OFFLINEFILES_EVENTS = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ENABLED: OFFLINEFILES_EVENTS = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ENCRYPTIONCHANGED: OFFLINEFILES_EVENTS = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_SYNCBEGIN: OFFLINEFILES_EVENTS = 5i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_SYNCFILERESULT: OFFLINEFILES_EVENTS = 6i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_SYNCCONFLICTRECADDED: OFFLINEFILES_EVENTS = 7i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_SYNCCONFLICTRECUPDATED: OFFLINEFILES_EVENTS = 8i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_SYNCCONFLICTRECREMOVED: OFFLINEFILES_EVENTS = 9i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_SYNCEND: OFFLINEFILES_EVENTS = 10i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_BACKGROUNDSYNCBEGIN: OFFLINEFILES_EVENTS = 11i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_BACKGROUNDSYNCEND: OFFLINEFILES_EVENTS = 12i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_NETTRANSPORTARRIVED: OFFLINEFILES_EVENTS = 13i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_NONETTRANSPORTS: OFFLINEFILES_EVENTS = 14i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMDISCONNECTED: OFFLINEFILES_EVENTS = 15i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMRECONNECTED: OFFLINEFILES_EVENTS = 16i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMAVAILABLEOFFLINE: OFFLINEFILES_EVENTS = 17i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMNOTAVAILABLEOFFLINE: OFFLINEFILES_EVENTS = 18i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMPINNED: OFFLINEFILES_EVENTS = 19i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMNOTPINNED: OFFLINEFILES_EVENTS = 20i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMMODIFIED: OFFLINEFILES_EVENTS = 21i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMADDEDTOCACHE: OFFLINEFILES_EVENTS = 22i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMDELETEDFROMCACHE: OFFLINEFILES_EVENTS = 23i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMRENAMED: OFFLINEFILES_EVENTS = 24i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_DATALOST: OFFLINEFILES_EVENTS = 25i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_PING: OFFLINEFILES_EVENTS = 26i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMRECONNECTBEGIN: OFFLINEFILES_EVENTS = 27i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_ITEMRECONNECTEND: OFFLINEFILES_EVENTS = 28i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_CACHEEVICTBEGIN: OFFLINEFILES_EVENTS = 29i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_CACHEEVICTEND: OFFLINEFILES_EVENTS = 30i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_POLICYCHANGEDETECTED: OFFLINEFILES_EVENTS = 31i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_PREFERENCECHANGEDETECTED: OFFLINEFILES_EVENTS = 32i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_SETTINGSCHANGESAPPLIED: OFFLINEFILES_EVENTS = 33i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_TRANSPARENTCACHEITEMNOTIFY: OFFLINEFILES_EVENTS = 34i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"] | #[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_PREFETCHCLOSEHANDLEBEGIN: OFFLINEFILES_EVENTS = 37i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_PREFETCHCLOSEHANDLEEND: OFFLINEFILES_EVENTS = 38i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_NUM_EVENTS: OFFLINEFILES_EVENTS = 39i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_ITEM_COPY = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_COPY_LOCAL: OFFLINEFILES_ITEM_COPY = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_COPY_REMOTE: OFFLINEFILES_ITEM_COPY = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_COPY_ORIGINAL: OFFLINEFILES_ITEM_COPY = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_CREATED: u32 = 8u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_DELETED: u32 = 16u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_DIRECTORY: u32 = 256u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_DIRTY: u32 = 32u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_FILE: u32 = 128u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_GHOST: u32 = 8192u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_GUEST_ANYACCESS: u32 = 33554432u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_GUEST_READ: u32 = 16777216u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_GUEST_WRITE: u32 = 8388608u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_MODIFIED: u32 = 4u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_MODIFIED_ATTRIBUTES: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_MODIFIED_DATA: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_OFFLINE: u32 = 32768u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_ONLINE: u32 = 65536u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_OTHER_ANYACCESS: u32 = 4194304u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_OTHER_READ: u32 = 2097152u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_OTHER_WRITE: u32 = 1048576u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_PINNED: u32 = 4096u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_PINNED_COMPUTER: u32 = 2048u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_PINNED_OTHERS: u32 = 1024u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_PINNED_USER: u32 = 512u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_SPARSE: u32 = 64u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_SUSPENDED: u32 = 16384u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_USER_ANYACCESS: u32 = 524288u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_USER_READ: u32 = 262144u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_FILTER_FLAG_USER_WRITE: u32 = 131072u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_QUERY_ADMIN: u32 = 2147483648u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_QUERY_ATTEMPT_TRANSITIONONLINE: u32 = 32u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_QUERY_CONNECTIONSTATE: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_QUERY_INCLUDETRANSPARENTCACHE: u32 = 16u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_QUERY_LOCALDIRTYBYTECOUNT: u32 = 4u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_QUERY_REMOTEDIRTYBYTECOUNT: u32 = 8u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_QUERY_REMOTEINFO: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_ITEM_TIME = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_TIME_CREATION: OFFLINEFILES_ITEM_TIME = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_TIME_LASTACCESS: OFFLINEFILES_ITEM_TIME = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_TIME_LASTWRITE: OFFLINEFILES_ITEM_TIME = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_ITEM_TYPE = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_TYPE_FILE: OFFLINEFILES_ITEM_TYPE = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_TYPE_DIRECTORY: OFFLINEFILES_ITEM_TYPE = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_TYPE_SHARE: OFFLINEFILES_ITEM_TYPE = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_ITEM_TYPE_SERVER: OFFLINEFILES_ITEM_TYPE = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_OFFLINE_REASON = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OFFLINE_REASON_UNKNOWN: OFFLINEFILES_OFFLINE_REASON = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OFFLINE_REASON_NOT_APPLICABLE: OFFLINEFILES_OFFLINE_REASON = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OFFLINE_REASON_CONNECTION_FORCED: OFFLINEFILES_OFFLINE_REASON = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OFFLINE_REASON_CONNECTION_SLOW: OFFLINEFILES_OFFLINE_REASON = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OFFLINE_REASON_CONNECTION_ERROR: OFFLINEFILES_OFFLINE_REASON = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OFFLINE_REASON_ITEM_VERSION_CONFLICT: OFFLINEFILES_OFFLINE_REASON = 5i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OFFLINE_REASON_ITEM_SUSPENDED: OFFLINEFILES_OFFLINE_REASON = 6i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_OP_RESPONSE = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OP_CONTINUE: OFFLINEFILES_OP_RESPONSE = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OP_RETRY: OFFLINEFILES_OP_RESPONSE = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_OP_ABORT: OFFLINEFILES_OP_RESPONSE = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_PATHFILTER_MATCH = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PATHFILTER_SELF: OFFLINEFILES_PATHFILTER_MATCH = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PATHFILTER_CHILD: OFFLINEFILES_PATHFILTER_MATCH = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PATHFILTER_DESCENDENT: OFFLINEFILES_PATHFILTER_MATCH = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PATHFILTER_SELFORCHILD: OFFLINEFILES_PATHFILTER_MATCH = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PATHFILTER_SELFORDESCENDENT: OFFLINEFILES_PATHFILTER_MATCH = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PINLINKTARGETS_ALWAYS: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PINLINKTARGETS_EXPLICIT: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PINLINKTARGETS_NEVER: u32 = 0u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_ASYNCPROGRESS: u32 = 1024u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_BACKGROUND: u32 = 65536u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_CONSOLE: u32 = 4096u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_FILL: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_FORALL: u32 = 128u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_FORREDIR: u32 = 256u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_FORUSER: u32 = 32u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_FORUSER_POLICY: u32 = 64u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_INTERACTIVE: u32 = 2048u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_LOWPRIORITY: u32 = 512u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_PIN_CONTROL_FLAG_PINLINKTARGETS: u32 = 16u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SETTING_PinLinkTargets: &'static str = "LinkTargetCaching";
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SETTING_SCOPE_COMPUTER: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SETTING_SCOPE_USER: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_SETTING_VALUE_TYPE = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SETTING_VALUE_UI4: OFFLINEFILES_SETTING_VALUE_TYPE = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SETTING_VALUE_BSTR: OFFLINEFILES_SETTING_VALUE_TYPE = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SETTING_VALUE_BSTR_DBLNULTERM: OFFLINEFILES_SETTING_VALUE_TYPE = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SETTING_VALUE_2DIM_ARRAY_BSTR_UI4: OFFLINEFILES_SETTING_VALUE_TYPE = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SETTING_VALUE_2DIM_ARRAY_BSTR_BSTR: OFFLINEFILES_SETTING_VALUE_TYPE = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_SYNC_CONFLICT_RESOLVE = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_RESOLVE_NONE: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_RESOLVE_KEEPLOCAL: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_RESOLVE_KEEPREMOTE: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_RESOLVE_KEEPALLCHANGES: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_RESOLVE_KEEPLATEST: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_RESOLVE_LOG: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 5i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_RESOLVE_SKIP: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 6i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_ABORT: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 7i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONFLICT_RESOLVE_NUMCODES: OFFLINEFILES_SYNC_CONFLICT_RESOLVE = 8i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_CR_DEFAULT: u32 = 0u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_CR_KEEPLATEST: u32 = 805306368u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_CR_KEEPLOCAL: u32 = 268435456u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_CR_KEEPREMOTE: u32 = 536870912u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_CR_MASK: u32 = 4026531840u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_ASYNCPROGRESS: u32 = 1024u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_BACKGROUND: u32 = 65536u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_CONSOLE: u32 = 4096u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_FILLSPARSE: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_INTERACTIVE: u32 = 2048u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_LOWPRIORITY: u32 = 512u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_NONEWFILESOUT: u32 = 131072u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_PINFORALL: u32 = 128u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_PINFORREDIR: u32 = 256u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_PINFORUSER: u32 = 32u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_PINFORUSER_POLICY: u32 = 64u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_PINLINKTARGETS: u32 = 16u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_PINNEWFILES: u32 = 8u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_SKIPSUSPENDEDDIRS: u32 = 8192u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_SYNCIN: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_CONTROL_FLAG_SYNCOUT: u32 = 4u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_ITEM_CHANGE_ATTRIBUTES: u32 = 8u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_ITEM_CHANGE_CHANGETIME: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_ITEM_CHANGE_FILESIZE: u32 = 4u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_ITEM_CHANGE_NONE: u32 = 0u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_ITEM_CHANGE_WRITETIME: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_SYNC_OPERATION = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_OPERATION_CREATE_COPY_ON_SERVER: OFFLINEFILES_SYNC_OPERATION = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_OPERATION_CREATE_COPY_ON_CLIENT: OFFLINEFILES_SYNC_OPERATION = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_OPERATION_SYNC_TO_SERVER: OFFLINEFILES_SYNC_OPERATION = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_OPERATION_SYNC_TO_CLIENT: OFFLINEFILES_SYNC_OPERATION = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_OPERATION_DELETE_SERVER_COPY: OFFLINEFILES_SYNC_OPERATION = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_OPERATION_DELETE_CLIENT_COPY: OFFLINEFILES_SYNC_OPERATION = 5i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_OPERATION_PIN: OFFLINEFILES_SYNC_OPERATION = 6i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_OPERATION_PREPARE: OFFLINEFILES_SYNC_OPERATION = 7i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub type OFFLINEFILES_SYNC_STATE = i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_Stable: OFFLINEFILES_SYNC_STATE = 0i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileOnClient_DirOnServer: OFFLINEFILES_SYNC_STATE = 1i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileOnClient_NoServerCopy: OFFLINEFILES_SYNC_STATE = 2i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirOnClient_FileOnServer: OFFLINEFILES_SYNC_STATE = 3i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirOnClient_FileChangedOnServer: OFFLINEFILES_SYNC_STATE = 4i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirOnClient_NoServerCopy: OFFLINEFILES_SYNC_STATE = 5i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileCreatedOnClient_NoServerCopy: OFFLINEFILES_SYNC_STATE = 6i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileCreatedOnClient_FileChangedOnServer: OFFLINEFILES_SYNC_STATE = 7i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileCreatedOnClient_DirChangedOnServer: OFFLINEFILES_SYNC_STATE = 8i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileCreatedOnClient_FileOnServer: OFFLINEFILES_SYNC_STATE = 9i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileCreatedOnClient_DirOnServer: OFFLINEFILES_SYNC_STATE = 10i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileCreatedOnClient_DeletedOnServer: OFFLINEFILES_SYNC_STATE = 11i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileChangedOnClient_ChangedOnServer: OFFLINEFILES_SYNC_STATE = 12i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileChangedOnClient_DirOnServer: OFFLINEFILES_SYNC_STATE = 13i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileChangedOnClient_DirChangedOnServer: OFFLINEFILES_SYNC_STATE = 14i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileChangedOnClient_DeletedOnServer: OFFLINEFILES_SYNC_STATE = 15i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileSparseOnClient_ChangedOnServer: OFFLINEFILES_SYNC_STATE = 16i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileSparseOnClient_DeletedOnServer: OFFLINEFILES_SYNC_STATE = 17i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileSparseOnClient_DirOnServer: OFFLINEFILES_SYNC_STATE = 18i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileSparseOnClient_DirChangedOnServer: OFFLINEFILES_SYNC_STATE = 19i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirCreatedOnClient_NoServerCopy: OFFLINEFILES_SYNC_STATE = 20i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirCreatedOnClient_DirOnServer: OFFLINEFILES_SYNC_STATE = 21i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirCreatedOnClient_FileOnServer: OFFLINEFILES_SYNC_STATE = 22i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirCreatedOnClient_FileChangedOnServer: OFFLINEFILES_SYNC_STATE = 23i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirCreatedOnClient_DirChangedOnServer: OFFLINEFILES_SYNC_STATE = 24i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirCreatedOnClient_DeletedOnServer: OFFLINEFILES_SYNC_STATE = 25i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirChangedOnClient_FileOnServer: OFFLINEFILES_SYNC_STATE = 26i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirChangedOnClient_FileChangedOnServer: OFFLINEFILES_SYNC_STATE = 27i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirChangedOnClient_ChangedOnServer: OFFLINEFILES_SYNC_STATE = 28i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirChangedOnClient_DeletedOnServer: OFFLINEFILES_SYNC_STATE = 29i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_NoClientCopy_FileOnServer: OFFLINEFILES_SYNC_STATE = 30i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_NoClientCopy_DirOnServer: OFFLINEFILES_SYNC_STATE = 31i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_NoClientCopy_FileChangedOnServer: OFFLINEFILES_SYNC_STATE = 32i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_NoClientCopy_DirChangedOnServer: OFFLINEFILES_SYNC_STATE = 33i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DeletedOnClient_FileOnServer: OFFLINEFILES_SYNC_STATE = 34i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DeletedOnClient_DirOnServer: OFFLINEFILES_SYNC_STATE = 35i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DeletedOnClient_FileChangedOnServer: OFFLINEFILES_SYNC_STATE = 36i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DeletedOnClient_DirChangedOnServer: OFFLINEFILES_SYNC_STATE = 37i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileSparseOnClient: OFFLINEFILES_SYNC_STATE = 38i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileChangedOnClient: OFFLINEFILES_SYNC_STATE = 39i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileRenamedOnClient: OFFLINEFILES_SYNC_STATE = 40i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirSparseOnClient: OFFLINEFILES_SYNC_STATE = 41i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirChangedOnClient: OFFLINEFILES_SYNC_STATE = 42i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirRenamedOnClient: OFFLINEFILES_SYNC_STATE = 43i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileChangedOnServer: OFFLINEFILES_SYNC_STATE = 44i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileRenamedOnServer: OFFLINEFILES_SYNC_STATE = 45i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileDeletedOnServer: OFFLINEFILES_SYNC_STATE = 46i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirChangedOnServer: OFFLINEFILES_SYNC_STATE = 47i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirRenamedOnServer: OFFLINEFILES_SYNC_STATE = 48i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_DirDeletedOnServer: OFFLINEFILES_SYNC_STATE = 49i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileReplacedAndDeletedOnClient_FileOnServer: OFFLINEFILES_SYNC_STATE = 50i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileReplacedAndDeletedOnClient_FileChangedOnServer: OFFLINEFILES_SYNC_STATE = 51i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileReplacedAndDeletedOnClient_DirOnServer: OFFLINEFILES_SYNC_STATE = 52i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_FileReplacedAndDeletedOnClient_DirChangedOnServer: OFFLINEFILES_SYNC_STATE = 53i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_NUMSTATES: OFFLINEFILES_SYNC_STATE = 54i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_LOCAL_KNOWN: u32 = 1u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_SYNC_STATE_REMOTE_KNOWN: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_TRANSITION_FLAG_CONSOLE: u32 = 2u32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_TRANSITION_FLAG_INTERACTIVE: u32 = 1u32;
pub const OfflineFilesCache: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 1220984444, data2: 14449, data3: 17356, data4: [180, 111, 20, 73, 161, 187, 47, 243] };
pub const OfflineFilesSetting: ::windows_sys::core::GUID = ::windows_sys::core::GUID { data1: 4248197609, data2: 43296, data3: 16675, data4: [173, 100, 127, 199, 108, 122, 172, 223] }; | pub const OFFLINEFILES_EVENT_PREFETCHFILEBEGIN: OFFLINEFILES_EVENTS = 35i32;
#[doc = "*Required features: 'Win32_Storage_OfflineFiles'*"]
pub const OFFLINEFILES_EVENT_PREFETCHFILEEND: OFFLINEFILES_EVENTS = 36i32; |
Permuting Two Arrays.py | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'twoArrays' function below.
#
# The function is expected to return a STRING.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY A
# 3. INTEGER_ARRAY B
#
def | (k, A, B):
# Write your code here
A = sorted(A)
B = sorted(B, reverse=True)
for x, y in zip(A, B):
if x+y < k:
return "NO"
return "YES"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
for q_itr in range(q):
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
A = list(map(int, input().rstrip().split()))
B = list(map(int, input().rstrip().split()))
result = twoArrays(k, A, B)
fptr.write(result + '\n')
fptr.close()
| twoArrays |
resource_info.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"fmt"
"math"
"k8s.io/api/core/v1"
)
type Resource struct {
MilliCPU float64
Memory float64
}
func EmptyResource() *Resource |
func (r *Resource) Clone() *Resource {
clone := &Resource{
MilliCPU: r.MilliCPU,
Memory: r.Memory,
}
return clone
}
var minMilliCPU float64 = 10
var minMemory float64 = 10 * 1024 * 1024
func NewResource(rl v1.ResourceList) *Resource {
r := EmptyResource()
for rName, rQuant := range rl {
switch rName {
case v1.ResourceCPU:
r.MilliCPU += float64(rQuant.MilliValue())
case v1.ResourceMemory:
r.Memory += float64(rQuant.Value())
}
}
return r
}
func (r *Resource) IsEmpty() bool {
return r.MilliCPU < minMilliCPU && r.Memory < minMemory
}
func (r *Resource) IsZero(rn v1.ResourceName) bool {
switch rn {
case v1.ResourceCPU:
return r.MilliCPU < minMilliCPU
case v1.ResourceMemory:
return r.Memory < minMemory
default:
panic("unknown resource")
}
}
func (r *Resource) Add(rr *Resource) *Resource {
r.MilliCPU += rr.MilliCPU
r.Memory += rr.Memory
return r
}
//A function to Subtract two Resource objects.
func (r *Resource) Sub(rr *Resource) *Resource {
if r.Less(rr) == false {
r.MilliCPU -= rr.MilliCPU
r.Memory -= rr.Memory
return r
}
panic("Resource is not sufficient to do operation: Sub()")
}
func (r *Resource) Less(rr *Resource) bool {
return r.MilliCPU < rr.MilliCPU && r.Memory < rr.Memory
}
func (r *Resource) LessEqual(rr *Resource) bool {
return (r.MilliCPU < rr.MilliCPU || math.Abs(rr.MilliCPU-r.MilliCPU) < 0.01) &&
(r.Memory < rr.Memory || math.Abs(rr.Memory-r.Memory) < 1)
}
func (r *Resource) String() string {
return fmt.Sprintf("cpu %0.2f, memory %0.2f",
r.MilliCPU, r.Memory)
}
func (r *Resource) Get(rn v1.ResourceName) float64 {
switch rn {
case v1.ResourceCPU:
return r.MilliCPU
case v1.ResourceMemory:
return r.Memory
default:
panic("not support resource.")
}
}
func ResourceNames() []v1.ResourceName {
return []v1.ResourceName{v1.ResourceCPU, v1.ResourceMemory}
}
| {
return &Resource{
MilliCPU: 0,
Memory: 0,
}
} |
update_ethz.py | #! /usr/bin/env python
from spider import *
sys.path.append("..")
from utils import Utils
class EthzSpider(Spider):
def __init__(self):
Spider.__init__(self)
self.school = "ethz"
self.semkezDict = {}
self.deptDict = {}
self.utils = Utils()
def processData(self, semkez, deptId, subject):
print "processing " + semkez + " " + deptId + " " + subject
r = requests.get('http://www.vvz.ethz.ch/Vorlesungsverzeichnis/sucheLehrangebot.do?wahlinfo=&seite=0&katalogdaten=&lerneinheitstitel=&studiengangTyp=&strukturAus=on&rufname=&bereichAbschnittId=0&lang=en&ansicht=3&lehrsprache=&studiengangAbschnittId=0&semkez=' + semkez + '&famname=&deptId=' + deptId + '&unterbereichAbschnittId=0&lerneinheitscode=')
soup = BeautifulSoup(r.text)
file_name = self.get_file_name(subject.lower(), self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
for a in soup.find_all('a'):
if a.attrs.has_key('href') and a['href'].find('lerneinheitPre.do') != -1:
title = self.utils.removeDoubleSpace(a.text.strip().replace('\n','').replace('\t', ''))
if len(title) > 2:
print title
self.count += 1
self.write_db(f, self.school + "-" + str(deptId) + "-" + str(self.count), title, 'http://www.vvz.ethz.ch' + a['href'])
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def doWork(self):
r = requests.get('http://www.vvz.ethz.ch/Vorlesungsverzeichnis/sucheLehrangebotPre.do?lang=en')
soup = BeautifulSoup(r.text)
for select in soup.find_all('select', class_='w50'):
if select['name'] == "semkez":
soup1 = BeautifulSoup(select.prettify())
for option in soup1.find_all('option'):
if option.text.strip() != '':
|
if select['name'] == "deptId":
soup1 = BeautifulSoup(select.prettify())
for option in soup1.find_all('option'):
if option.text.strip() != '':
self.deptDict[option['value']] = option.text.strip()
for k, v in [(k,self.deptDict[k]) for k in self.deptDict.keys()]:
if self.need_update_subject(v) == False:
continue
year = time.strftime("%Y")
for semkez in self.semkezDict.keys():
if semkez[0 : 4] == year:
self.processData(semkez, k, v)
start = EthzSpider()
start.doWork()
| self.semkezDict[option['value']] = option.text.strip() |
table.rs | pub fn standard() -> Vec<u8> {
(b'A'..=b'Z')
.chain(b'a'..=b'z')
.chain(0..=9)
.chain(vec![b'+', b'/'].into_iter())
.collect() | } |
|
unittest_pyreverse_inspector.py | # Copyright (c) 2003-2015 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
for the visitors.diadefs module
"""
import os
import unittest
import astroid
from astroid import nodes
from astroid import bases
from astroid import manager
from astroid import test_utils
from pylint.pyreverse import inspector
from unittest_pyreverse_writer import get_project
MANAGER = manager.AstroidManager()
def astroid_wrapper(func, modname):
return func(modname)
class LinkerTest(unittest.TestCase):
def setUp(self):
|
def test_class_implements(self):
klass = self.project.get_module('data.clientmodule_test')['Ancestor']
self.assertTrue(hasattr(klass, 'implements'))
self.assertEqual(len(klass.implements), 1)
self.assertTrue(isinstance(klass.implements[0], nodes.ClassDef))
self.assertEqual(klass.implements[0].name, "Interface")
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'implements'))
self.assertEqual(len(klass.implements), 0)
def test_locals_assignment_resolution(self):
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'locals_type'))
type_dict = klass.locals_type
self.assertEqual(len(type_dict), 2)
keys = sorted(type_dict.keys())
self.assertEqual(keys, ['TYPE', 'top'])
self.assertEqual(len(type_dict['TYPE']), 1)
self.assertEqual(type_dict['TYPE'][0].value, 'final class')
self.assertEqual(len(type_dict['top']), 1)
self.assertEqual(type_dict['top'][0].value, 'class')
def test_instance_attrs_resolution(self):
klass = self.project.get_module('data.clientmodule_test')['Specialization']
self.assertTrue(hasattr(klass, 'instance_attrs_type'))
type_dict = klass.instance_attrs_type
self.assertEqual(len(type_dict), 2)
keys = sorted(type_dict.keys())
self.assertEqual(keys, ['_id', 'relation'])
self.assertTrue(isinstance(type_dict['relation'][0], bases.Instance),
type_dict['relation'])
self.assertEqual(type_dict['relation'][0].name, 'DoNothing')
self.assertIs(type_dict['_id'][0], astroid.YES)
def test_concat_interfaces(self):
cls = test_utils.extract_node('''
class IMachin: pass
class Correct2:
"""docstring"""
__implements__ = (IMachin,)
class BadArgument:
"""docstring"""
__implements__ = (IMachin,)
class InterfaceCanNowBeFound: #@
"""docstring"""
__implements__ = BadArgument.__implements__ + Correct2.__implements__
''')
interfaces = inspector.interfaces(cls)
self.assertEqual([i.name for i in interfaces], ['IMachin'])
def test_interfaces(self):
module = astroid.parse('''
class Interface(object): pass
class MyIFace(Interface): pass
class AnotherIFace(Interface): pass
class Concrete0(object):
__implements__ = MyIFace
class Concrete1:
__implements__ = (MyIFace, AnotherIFace)
class Concrete2:
__implements__ = (MyIFace, AnotherIFace)
class Concrete23(Concrete1): pass
''')
for klass, interfaces in (('Concrete0', ['MyIFace']),
('Concrete1', ['MyIFace', 'AnotherIFace']),
('Concrete2', ['MyIFace', 'AnotherIFace']),
('Concrete23', ['MyIFace', 'AnotherIFace'])):
klass = module[klass]
self.assertEqual([i.name for i in inspector.interfaces(klass)],
interfaces)
def test_from_directory(self):
expected = os.path.join('pylint', 'test', 'data', '__init__.py')
self.assertEqual(self.project.name, 'data')
self.assertTrue(self.project.path.endswith(expected), self.project.path)
def test_project_node(self):
expected = [
'data', 'data.clientmodule_test',
'data.suppliermodule_test',
]
self.assertListEqual(sorted(self.project.keys()), expected)
if __name__ == '__main__':
unittest.main()
| super(LinkerTest, self).setUp()
self.project = get_project('data', 'data')
self.linker = inspector.Linker(self.project)
self.linker.visit(self.project) |
device.py | import typing
import netmiko
import napalm_digineo_procurve.queries.interfaces
import napalm_digineo_procurve.queries.lldp_neighbors
import napalm_digineo_procurve.queries.device_info
import napalm_digineo_procurve.queries.system_info
import napalm_digineo_procurve.queries.uptime
def get_uptime(device: netmiko.BaseConnection) -> float:
return napalm_digineo_procurve.queries.uptime.query(device)
def | (
device: netmiko.BaseConnection
) -> napalm_digineo_procurve.queries.system_info.SystemInformation:
return napalm_digineo_procurve.queries.system_info.query(device)
def get_device_manufacturer_info(
device: netmiko.BaseConnection
) -> napalm_digineo_procurve.queries.device_info.DeviceInformation:
return napalm_digineo_procurve.queries.device_info.query(device)
def get_interfaces(
device: netmiko.BaseConnection
) -> typing.Sequence[napalm_digineo_procurve.queries.interfaces.Interface]:
return napalm_digineo_procurve.queries.interfaces.query(device)
def get_lldp_neighbors(
device: netmiko.BaseConnection
) -> typing.List[typing.Mapping[str, str]]:
return napalm_digineo_procurve.queries.lldp_neighbors.query(device)
| get_system_information |
mod.rs | pub mod function_definitions;
pub mod struct_definitions; | pub mod method_definitions; |
pub mod enum_definitions;
|
result.rs | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR MIT
//
// Check that the Invariant implementation for Result respect underlying types invariant.
extern crate rmc;
use rmc::Invariant;
#[derive(PartialEq)]
enum Error {
Error1,
Error2,
}
struct MyType {
pub val: i32,
pub is_negative: bool,
}
unsafe impl rmc::Invariant for MyType {
fn is_valid(&self) -> bool {
(self.is_negative && self.val < 0) || (!self.is_negative && self.val >= 0)
}
}
unsafe impl rmc::Invariant for Error {
fn is_valid(&self) -> bool |
}
fn main() {
let result: Result<MyType, Error> = rmc::any();
match result {
Ok(v) => assert!(v.is_valid()),
Err(e) => assert!(e.is_valid()),
}
}
| {
matches!(*self, Error::Error1 | Error::Error2)
} |
__init__.py | from .crawl_cat_list import crawl_cat_list | from .crawl_by_cat_url import crawl_by_cat_url
from .crawl_by_search import crawl_by_search
from .crawl_by_shop_url import crawl_by_shop_url |
|
sharp-handyman.js | var data = {
"body": "<path d=\"M16.37 12.87h-.99l-2.54 2.54v.99l6.01 6.01l3.54-3.54l-6.02-6z\" fill=\"currentColor\"/><path d=\"M17.34 10.19l1.41-1.41l2.12 2.12a3 3 0 0 0 0-4.24l-3.54-3.54l-1.41 1.41V1.71l-.7-.71l-3.54 3.54l.71.71h2.83l-1.41 1.41l1.06 1.06l-2.89 2.89l-4.13-4.13V5.06L4.83 2.04L2 4.87L5.03 7.9h1.41l4.13 4.13l-.85.85H7.6l-6.01 6.01l3.54 3.54l6.01-6.01V14.3l5.15-5.15l1.05 1.04z\" fill=\"currentColor\"/>",
"width": 24,
"height": 24
};
exports.__esModule = true; | exports.default = data; |
|
helpers.py | """
Small helpers for code that is not shown in the notebooks
"""
from sklearn import neighbors, datasets, linear_model
import pylab as pl
import numpy as np
from matplotlib.colors import ListedColormap
# Create color maps for 3-class classification problem, as with iris
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
def plot_iris_knn():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
knn = neighbors.KNeighborsClassifier(n_neighbors=3)
knn.fit(X, y)
x_min, x_max = X[:, 0].min() - .1, X[:, 0].max() + .1
y_min, y_max = X[:, 1].min() - .1, X[:, 1].max() + .1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),
np.linspace(y_min, y_max, 100))
Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure()
pl.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold) | pl.ylabel('sepal width (cm)')
pl.axis('tight')
def plot_polynomial_regression():
rng = np.random.RandomState(0)
x = 2*rng.rand(100) - 1
f = lambda t: 1.2 * t**2 + .1 * t**3 - .4 * t **5 - .5 * t ** 9
y = f(x) + .4 * rng.normal(size=100)
x_test = np.linspace(-1, 1, 100)
pl.figure()
pl.scatter(x, y, s=4)
X = np.array([x**i for i in range(5)]).T
X_test = np.array([x_test**i for i in range(5)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='4th order')
X = np.array([x**i for i in range(10)]).T
X_test = np.array([x_test**i for i in range(10)]).T
regr = linear_model.LinearRegression()
regr.fit(X, y)
pl.plot(x_test, regr.predict(X_test), label='9th order')
pl.legend(loc='best')
pl.axis('tight')
pl.title('Fitting a 4th and a 9th order polynomial')
pl.figure()
pl.scatter(x, y, s=4)
pl.plot(x_test, f(x_test), label="truth")
pl.axis('tight')
pl.title('Ground truth (9th order polynomial)') | pl.xlabel('sepal length (cm)') |
day20.rs | use std::collections::HashMap;
use std::fmt::Debug;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum Light {
On,
Off,
}
fn to_number(lights: &[Light]) -> usize {
let binary_string: String = lights
.iter()
.map(|l| match l {
Light::On => "1",
Light::Off => "0",
})
.collect();
usize::from_str_radix(&binary_string, 2).unwrap()
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
struct LightAndPosition {
state: Light,
coordinates: [i32; 2],
}
struct CoordinateDirection2d<'a> {
coordinates: [i32; 2],
maximum: &'a [i32; 2],
minimum: &'a [i32; 2],
}
impl Iterator for CoordinateDirection2d<'_> {
type Item = [i32; 2];
fn | (&mut self) -> Option<[i32; 2]> {
if self.coordinates[1] > self.maximum[1] {
return None;
}
let to_return = self.coordinates.clone();
if self.coordinates[0] == self.maximum[0] {
self.coordinates[0] = self.minimum[0];
self.coordinates[1] += 1;
} else {
self.coordinates[0] += 1;
}
Some(to_return)
}
}
impl From<&str> for Light {
fn from(s: &str) -> Self {
match s {
"#" => Light::On,
"." => Light::Off,
_ => unimplemented!(),
}
}
}
#[aoc_generator(day20)]
fn to_vec(input: &str) -> (Vec<Light>, [i32; 2], HashMap<[i32; 2], LightAndPosition>) {
let mut space: HashMap<[i32; 2], LightAndPosition> = HashMap::new();
let mut y = 0;
let mut max_x = 0;
let things: Vec<&str> = input.split("\n\n").filter(|s| !s.is_empty()).collect();
let enhancer: Vec<Light> = things[0]
.split("")
.filter(|s| !s.trim().is_empty())
.map(|s| s.into())
.collect();
for l in things[1].lines() {
for (x, s) in l.split("").filter(|&s| !s.is_empty()).enumerate() {
space.insert(
[x as i32, y],
LightAndPosition {
state: s.into(),
coordinates: [x as i32, y],
},
);
max_x = x.max(max_x);
}
if !l.trim().is_empty() {
y += 1;
}
}
(enhancer, [max_x as i32, y - 1], space)
}
#[aoc(day20, part1)]
fn day20_1(input: &(Vec<Light>, [i32; 2], HashMap<[i32; 2], LightAndPosition>)) -> usize {
let (enhancer, max_coor, space) = input;
let mut curr_vals = space.clone();
let mut x_max = max_coor[0];
let mut y_max = max_coor[1];
let mut default_light = Light::Off;
for _ in 0..2 {
let mut new_vals = HashMap::new();
x_max = x_max + 2;
y_max = y_max + 2;
for x in -2..=x_max {
for y in -2..=y_max {
let coors = [x, y];
let lights_coors = vec![
[coors[0] - 1, coors[1] - 1],
[coors[0], coors[1] - 1],
[coors[0] + 1, coors[1] - 1],
[coors[0] - 1, coors[1]],
[coors[0], coors[1]],
[coors[0] + 1, coors[1]],
[coors[0] - 1, coors[1] + 1],
[coors[0], coors[1] + 1],
[coors[0] + 1, coors[1] + 1],
];
let mut lights = vec![];
for coor_iter in lights_coors.into_iter() {
lights.push(
curr_vals
.get(&coor_iter)
.map(|l| l.state)
.unwrap_or(default_light),
);
}
let v = to_number(lights.as_slice());
let new_value = LightAndPosition {
coordinates: coors,
state: enhancer[v].clone(),
};
new_vals.insert(coors, new_value);
}
}
curr_vals = new_vals;
default_light = match default_light {
Light::On => enhancer[enhancer.len() - 1],
Light::Off => enhancer[0],
};
println!(
"{}",
curr_vals
.values()
.into_iter()
.map(|l| match l.state {
Light::On => 1,
Light::Off => 0,
})
.sum::<usize>()
);
}
curr_vals
.values()
.into_iter()
.map(|l| match l.state {
Light::On => 1,
Light::Off => 0,
})
.sum()
}
#[aoc(day20, part2)]
fn day20_2(input: &(Vec<Light>, [i32; 2], HashMap<[i32; 2], LightAndPosition>)) -> usize {
let (enhancer, max_coor, space) = input;
let mut curr_vals = space.clone();
let mut x_max = max_coor[0];
let mut y_max = max_coor[1];
let mut y_min = 0;
let mut x_min = 0;
let mut default_light = Light::Off;
for _ in 0..50 {
let mut new_vals = HashMap::new();
x_min = x_min - 2;
y_min = y_min - 2;
x_max = x_max + 2;
y_max = y_max + 2;
for x in x_min..=x_max {
for y in y_min..=y_max {
let coors = [x, y];
let lights_coors = vec![
[coors[0] - 1, coors[1] - 1],
[coors[0], coors[1] - 1],
[coors[0] + 1, coors[1] - 1],
[coors[0] - 1, coors[1]],
[coors[0], coors[1]],
[coors[0] + 1, coors[1]],
[coors[0] - 1, coors[1] + 1],
[coors[0], coors[1] + 1],
[coors[0] + 1, coors[1] + 1],
];
let mut lights = vec![];
for coor_iter in lights_coors.into_iter() {
lights.push(
curr_vals
.get(&coor_iter)
.map(|l| l.state)
.unwrap_or(default_light),
);
}
let v = to_number(lights.as_slice());
let new_value = LightAndPosition {
coordinates: coors,
state: enhancer[v].clone(),
};
new_vals.insert(coors, new_value);
}
}
curr_vals = new_vals;
default_light = match default_light {
Light::On => enhancer[enhancer.len() - 1],
Light::Off => enhancer[0],
};
}
curr_vals
.values()
.into_iter()
.map(|l| match l.state {
Light::On => 1,
Light::Off => 0,
})
.sum()
}
#[cfg(test)]
mod tests {
use super::*;
const TEST_INPUT: &str =
"..#.#..#####.#.#.#.###.##.....###.##.#..###.####..#####..#....#..#..##..##
#..######.###...####..#..#####..##..#.#####...##.#.#..#.##..#.#......#.###
.######.###.####...#.##.##..#..#..#####.....#.#....###..#.##......#.....#.
.#..#..##..#...##.######.####.####.#.#...#.......#..#.#.#...####.##.#.....
.#..#...##.#.##..#...##.#.##..###.#......#.#.......#.#.#.####.###.##...#..
...####.#..#..#.##.#....##..#.####....##...##..#...#......#.#.......#.....
..##..####..#...#.#.#...##..#.#..###..#####........#..####......#..#
#..#.
#....
##..#
..#..
..###";
#[test]
fn test_input() {
assert_eq!(day20_1(&to_vec(TEST_INPUT)), 35);
assert_eq!(day20_2(&to_vec(TEST_INPUT)), 3351)
}
}
| next |
MsalWebAppAuthClient.ts | /*
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License.
*/
import express, {
RequestHandler,
Request,
Response,
NextFunction,
Router
} from "express";
import {
OIDC_DEFAULT_SCOPES,
InteractionRequiredAuthError,
StringUtils,
} from "@azure/msal-common";
import {
Configuration,
SilentFlowRequest,
AuthenticationResult
} from "@azure/msal-node";
import { BaseAuthClient } from "../BaseAuthClient";
import { ConfigHelper } from "../../config/ConfigHelper";
import { IdTokenClaims } from "../../crypto/AuthToken";
import { FetchManager } from "../../network/FetchManager";
import { UrlUtils } from "../../utils/UrlUtils";
import { CryptoUtils } from "../../utils/CryptoUtils"
import {
Resource,
AppSettings,
AccessRule,
} from "../../config/AppSettings";
import { AuthCodeParams } from "../../utils/Types";
import {
InitializationOptions,
TokenRequestOptions,
GuardOptions, | SignInOptions,
SignOutOptions,
HandleRedirectOptions
} from "../MiddlewareOptions";
import {
AppStages,
ErrorMessages,
AccessControlConstants,
InfoMessages
} from "../../utils/Constants";
/**
* A simple wrapper around MSAL Node ConfidentialClientApplication object.
* It offers a collection of middleware and utility methods that automate
* basic authentication and authorization tasks in Express MVC web apps
*/
export class MsalWebAppAuthClient extends BaseAuthClient {
private cryptoUtils: CryptoUtils;
/**
* @param {AppSettings} appSettings
* @param {Configuration} msalConfig
* @constructor
*/
constructor(appSettings: AppSettings, msalConfig: Configuration) {
super(appSettings, msalConfig);
this.cryptoUtils = new CryptoUtils();
}
/**
* Initialize AuthProvider and set default routes and handlers
* @param {InitializationOptions} options
* @returns {Router}
*/
initialize(options?: InitializationOptions): Router {
const appRouter = express.Router();
// handle redirect
appRouter.get(UrlUtils.getPathFromUrl(this.appSettings.authRoutes.redirect), this.handleRedirect());
appRouter.post(UrlUtils.getPathFromUrl(this.appSettings.authRoutes.redirect), this.handleRedirect());
appRouter.use((req: Request, res: Response, next: NextFunction) => {
if (!req.session) {
// TODO: handle gracefully
throw new Error(ErrorMessages.SESSION_NOT_FOUND);
}
// add session nonce for crsf
req.session.nonce = this.cryptoProvider.createNewGuid();
next();
});
if (this.appSettings.authRoutes.frontChannelLogout) {
/**
* Expose front-channel logout route. For more information, visit:
* https://docs.microsoft.com/azure/active-directory/develop/v2-protocols-oidc#single-sign-out
*/
appRouter.get(this.appSettings.authRoutes.frontChannelLogout, (req: Request, res: Response, next: NextFunction) => {
req.session.destroy(() => {
res.sendStatus(200);
});
});
}
return appRouter;
}
/**
* Initiates sign in flow
* @param {SignInOptions} options: options to modify login request
* @returns {RequestHandler}
*/
signIn(options?: SignInOptions): RequestHandler {
return (req: Request, res: Response, next: NextFunction): Promise<void> => {
const key = this.cryptoUtils.createKey(req.session.nonce, this.cryptoUtils.generateSalt());
req.session.cryptoKey = key.toString("hex");
const state = this.cryptoProvider.base64Encode(
this.cryptoUtils.encryptData(JSON.stringify({
stage: AppStages.SIGN_IN,
path: options.postLoginRedirect,
nonce: req.session.nonce,
}), key)
);
const params: AuthCodeParams = {
authority: this.msalConfig.auth.authority,
scopes: OIDC_DEFAULT_SCOPES,
state: state,
redirect: UrlUtils.ensureAbsoluteUrl(req, this.appSettings.authRoutes.redirect)
};
// get url to sign user in
return this.getAuthCode(req, res, next, params);
}
};
/**
* Initiate sign out and destroy the session
* @param options: options to modify logout request
* @returns {RequestHandler}
*/
signOut(options?: SignOutOptions): RequestHandler {
return (req: Request, res: Response, next: NextFunction): void => {
const postLogoutRedirectUri = UrlUtils.ensureAbsoluteUrl(req, options.postLogoutRedirect);
/**
* Construct a logout URI and redirect the user to end the
* session with Azure AD/B2C. For more information, visit:
* (AAD) https://docs.microsoft.com/azure/active-directory/develop/v2-protocols-oidc#send-a-sign-out-request
* (B2C) https://docs.microsoft.com/azure/active-directory-b2c/openid-connect#send-a-sign-out-request
*/
const logoutUri = `${this.msalConfig.auth.authority}/oauth2/v2.0/logout?post_logout_redirect_uri=${postLogoutRedirectUri}`;
req.session.destroy(() => {
res.redirect(logoutUri);
});
}
};
/**
* Middleware that handles redirect depending on request state
* There are basically 2 stages: sign-in and acquire token
* @param {HandleRedirectOptions} options: options to modify this middleware
* @returns {RequestHandler}
*/
private handleRedirect(options?: HandleRedirectOptions): RequestHandler {
return async (req: Request, res: Response, next: NextFunction): Promise<void> => {
// TODO: handle form_post method
if (req.query.state) {
const state = JSON.parse(this.cryptoUtils.decryptData(this.cryptoProvider.base64Decode(req.query.state as string), Buffer.from(req.session.cryptoKey, "hex")));
// check if nonce matches
if (state.nonce === req.session.nonce) {
switch (state.stage) {
case AppStages.SIGN_IN: {
// token request should have auth code
req.session.tokenRequest.code = req.query.code as string;
try {
// exchange auth code for tokens
const tokenResponse: AuthenticationResult = await this.msalClient.acquireTokenByCode(req.session.tokenRequest);
try {
const isIdTokenValid = await this.tokenValidator.validateIdToken(tokenResponse.idToken);
if (isIdTokenValid) {
// assign session variables
req.session.isAuthenticated = true;
req.session.account = tokenResponse.account;
res.redirect(state.path);
} else {
this.logger.error(ErrorMessages.INVALID_TOKEN);
res.redirect(this.appSettings.authRoutes.unauthorized);
}
} catch (error) {
this.logger.error(ErrorMessages.CANNOT_VALIDATE_TOKEN);
next(error)
}
} catch (error) {
this.logger.error(ErrorMessages.TOKEN_ACQUISITION_FAILED);
next(error)
}
break;
}
case AppStages.ACQUIRE_TOKEN: {
// get the name of the resource associated with scope
const resourceName = ConfigHelper.getResourceNameFromScopes(req.session.tokenRequest.scopes, this.appSettings);
req.session.tokenRequest.code = req.query.code as string;
try {
const tokenResponse: AuthenticationResult = await this.msalClient.acquireTokenByCode(req.session.tokenRequest);
req.session.protectedResources[resourceName].accessToken = tokenResponse.accessToken;
res.redirect(state.path);
} catch (error) {
this.logger.error(ErrorMessages.TOKEN_ACQUISITION_FAILED);
next(error);
}
break;
}
default:
this.logger.error(ErrorMessages.CANNOT_DETERMINE_APP_STAGE);
res.redirect(this.appSettings.authRoutes.error);
break;
}
} else {
this.logger.error(ErrorMessages.NONCE_MISMATCH);
res.redirect(this.appSettings.authRoutes.unauthorized);
}
} else {
this.logger.error(ErrorMessages.STATE_NOT_FOUND);
res.redirect(this.appSettings.authRoutes.unauthorized);
}
}
};
/**
* Middleware that gets tokens via acquireToken*
* @param {TokenRequestOptions} options: options to modify this middleware
* @returns {RequestHandler}
*/
getToken(options: TokenRequestOptions): RequestHandler {
return async (req: Request, res: Response, next: NextFunction): Promise<void> => {
// get scopes for token request
const scopes = options.resource.scopes;
const resourceName = ConfigHelper.getResourceNameFromScopes(scopes, this.appSettings)
if (!req.session.protectedResources) {
req.session.protectedResources = {}
}
req.session.protectedResources = {
[resourceName]: {
...this.appSettings.protectedResources[resourceName],
accessToken: null,
} as Resource
};
try {
const silentRequest: SilentFlowRequest = {
account: req.session.account,
scopes: scopes,
};
// acquire token silently to be used in resource call
const tokenResponse: AuthenticationResult = await this.msalClient.acquireTokenSilent(silentRequest);
// In B2C scenarios, sometimes an access token is returned empty.
// In that case, we will acquire token interactively instead.
if (StringUtils.isEmpty(tokenResponse.accessToken)) {
this.logger.error(ErrorMessages.TOKEN_NOT_FOUND);
throw new InteractionRequiredAuthError(ErrorMessages.INTERACTION_REQUIRED);
}
req.session.protectedResources[resourceName].accessToken = tokenResponse.accessToken;
next();
} catch (error) {
// in case there are no cached tokens, initiate an interactive call
if (error instanceof InteractionRequiredAuthError) {
const key = this.cryptoUtils.createKey(req.session.nonce, this.cryptoUtils.generateSalt());
req.session.cryptoKey = key.toString("hex");
const state = this.cryptoProvider.base64Encode(
this.cryptoUtils.encryptData(JSON.stringify({
stage: AppStages.ACQUIRE_TOKEN,
path: req.originalUrl,
nonce: req.session.nonce,
}), key)
);
const params: AuthCodeParams = {
authority: this.msalConfig.auth.authority,
scopes: scopes,
state: state,
redirect: UrlUtils.ensureAbsoluteUrl(req, this.appSettings.authRoutes.redirect),
account: req.session.account,
};
// initiate the first leg of auth code grant to get token
return this.getAuthCode(req, res, next, params);
} else {
next(error);
}
}
}
};
/**
* Check if authenticated in session
* @param {GuardOptions} options: options to modify this middleware
* @returns {RequestHandler}
*/
isAuthenticated(options?: GuardOptions): RequestHandler {
return (req: Request, res: Response, next: NextFunction): void => {
if (req.session) {
if (!req.session.isAuthenticated) {
this.logger.error(ErrorMessages.NOT_PERMITTED);
return res.redirect(this.appSettings.authRoutes.unauthorized);
}
next();
} else {
this.logger.error(ErrorMessages.SESSION_NOT_FOUND);
res.redirect(this.appSettings.authRoutes.unauthorized);
}
}
};
/**
* Checks if the user has access for this route, defined in access matrix
* @param {GuardOptions} options: options to modify this middleware
* @returns {RequestHandler}
*/
hasAccess(options?: GuardOptions): RequestHandler {
return async (req: Request, res: Response, next: NextFunction): Promise<void> => {
if (req.session && this.appSettings.accessMatrix) {
const checkFor = options.accessRule.hasOwnProperty(AccessControlConstants.GROUPS) ? AccessControlConstants.GROUPS : AccessControlConstants.ROLES;
switch (checkFor) {
case AccessControlConstants.GROUPS:
if (req.session.account.idTokenClaims[AccessControlConstants.GROUPS] === undefined) {
if (req.session.account.idTokenClaims[AccessControlConstants.CLAIM_NAMES]
|| req.session.account.idTokenClaims[AccessControlConstants.CLAIM_SOURCES]) {
this.logger.warning(InfoMessages.OVERAGE_OCCURRED);
return await this.handleOverage(req, res, next, options.accessRule);
} else {
this.logger.error(ErrorMessages.USER_HAS_NO_GROUP);
return res.redirect(this.appSettings.authRoutes.unauthorized);
}
} else {
const groups = req.session.account.idTokenClaims[AccessControlConstants.GROUPS];
if (!this.checkAccessRule(req.method, options.accessRule, groups, AccessControlConstants.GROUPS)) {
return res.redirect(this.appSettings.authRoutes.unauthorized);
}
}
next();
break;
case AccessControlConstants.ROLES:
if (req.session.account.idTokenClaims[AccessControlConstants.ROLES] === undefined) {
this.logger.error(ErrorMessages.USER_HAS_NO_ROLE);
return res.redirect(this.appSettings.authRoutes.unauthorized);
} else {
const roles = req.session.account.idTokenClaims[AccessControlConstants.ROLES];
if (!this.checkAccessRule(req.method, options.accessRule, roles, AccessControlConstants.ROLES)) {
return res.redirect(this.appSettings.authRoutes.unauthorized);
}
}
next();
break;
default:
break;
}
} else {
res.redirect(this.appSettings.authRoutes.unauthorized);
}
}
}
// ============== UTILS ===============
/**
* This method is used to generate an auth code url request
* @param {Request} req: express request object
* @param {Response} res: express response object
* @param {NextFunction} next: express next function
* @param {AuthCodeParams} params: modifies auth code url request
* @returns {Promise}
*/
private async getAuthCode(req: Request, res: Response, next: NextFunction, params: AuthCodeParams): Promise<void> {
// prepare the request
req.session.authCodeRequest = {
authority: params.authority,
scopes: params.scopes,
state: params.state,
redirectUri: params.redirect,
prompt: params.prompt,
account: params.account,
}
req.session.tokenRequest = {
authority: params.authority,
scopes: params.scopes,
redirectUri: params.redirect,
code: undefined,
}
// request an authorization code to exchange for tokens
try {
const response = await this.msalClient.getAuthCodeUrl(req.session.authCodeRequest);
res.redirect(response);
} catch (error) {
this.logger.error(ErrorMessages.AUTH_CODE_NOT_OBTAINED);
next(error);
}
};
/**
* Handles group overage claims by querying MS Graph /memberOf endpoint
* @param {Request} req: express request object
* @param {Response} res: express response object
* @param {NextFunction} next: express next function
* @param {AccessRule} rule: a given access rule
* @returns {Promise}
*/
private async handleOverage(req: Request, res: Response, next: NextFunction, rule: AccessRule): Promise<void> {
const { _claim_names, _claim_sources, ...newIdTokenClaims } = <IdTokenClaims>req.session.account.idTokenClaims;
const silentRequest: SilentFlowRequest = {
account: req.session.account,
scopes: AccessControlConstants.GRAPH_MEMBER_SCOPES.split(" "),
};
try {
// acquire token silently to be used in resource call
const tokenResponse = await this.msalClient.acquireTokenSilent(silentRequest);
try {
const graphResponse = await FetchManager.callApiEndpointWithToken(AccessControlConstants.GRAPH_MEMBERS_ENDPOINT, tokenResponse.accessToken);
/**
* Some queries against Microsoft Graph return multiple pages of data either due to server-side paging
* or due to the use of the $top query parameter to specifically limit the page size in a request.
* When a result set spans multiple pages, Microsoft Graph returns an @odata.nextLink property in
* the response that contains a URL to the next page of results. Learn more at https://docs.microsoft.com/graph/paging
*/
if (graphResponse[AccessControlConstants.PAGINATION_LINK]) {
try {
const userGroups = await FetchManager.handlePagination(tokenResponse.accessToken, graphResponse[AccessControlConstants.PAGINATION_LINK]);
req.session.account.idTokenClaims = {
...newIdTokenClaims,
groups: userGroups
}
if (!this.checkAccessRule(req.method, rule, req.session.account.idTokenClaims[AccessControlConstants.GROUPS], AccessControlConstants.GROUPS)) {
return res.redirect(this.appSettings.authRoutes.unauthorized);
} else {
return next();
}
} catch (error) {
next(error);
}
} else {
req.session.account.idTokenClaims = {
...newIdTokenClaims,
groups: graphResponse["value"].map((v) => v.id)
}
if (!this.checkAccessRule(req.method, rule, req.session.account.idTokenClaims[AccessControlConstants.GROUPS], AccessControlConstants.GROUPS)) {
return res.redirect(this.appSettings.authRoutes.unauthorized);
} else {
return next();
}
}
} catch (error) {
next(error);
}
} catch (error) {
// TODO: handle silent token acquisition error
next(error);
}
};
/**
* Checks if the request passes a given access rule
* @param {string} method: HTTP method for this route
* @param {AccessRule} rule: access rule for this route
* @param {Array} creds: user's credentials i.e. roles or groups
* @param {string} credType: roles or groups
* @returns {boolean}
*/
private checkAccessRule(method: string, rule: AccessRule, creds: string[], credType: string): boolean {
if (rule.methods.includes(method)) {
switch (credType) {
case AccessControlConstants.GROUPS:
if (rule.groups.filter(elem => creds.includes(elem)).length < 1) {
this.logger.error(ErrorMessages.USER_NOT_IN_GROUP);
return false;
}
break;
case AccessControlConstants.ROLES:
if (rule.roles.filter(elem => creds.includes(elem)).length < 1) {
this.logger.error(ErrorMessages.USER_NOT_IN_ROLE);
return false;
}
break;
default:
break;
}
} else {
this.logger.error(ErrorMessages.METHOD_NOT_ALLOWED);
return false;
}
return true;
}
} | |
websql.js | ;
(function (root, factory) {
'use strict'
if (typeof define === 'function' && define.amd) {
define('websql', factory)
} else if (typeof exports === 'object') {
exports = module.exports = factory()
} else {
root.websql = factory()
}
})(this, function () {
'use strict'
//htmlUtils
var _ = {
createClass: function () {
function defineProperties(target, props) {
for (var key in props) {
if (target.hasOwnProperty(key)) {
console.log(_.type(target) + " hasOwnProperty " + key)
} else {
//不覆盖已有属性
var descriptor = {
key: key,
value: props[key],
enumerable: false,
configurable: true,
writable: true
}
Object.defineProperty(target, key, descriptor);
}
}
}
return function (Constructor, protoProps, staticProps) {
if (protoProps) defineProperties(Constructor.prototype, protoProps);
if (staticProps) defineProperties(Constructor, staticProps);
return Constructor;
};
}(),
//类型
type: function (o) {
if (o === null) return 'null';
var s = Object.prototype.toString.call(o);
var t = s.match(/\[object (.*?)\]/)[1].toLowerCase();
return t === 'number' ? isNaN(o) ? 'nan' : !isFinite(o) ? 'infinity' : t : t;
},
//继承私有属性 不包括原型方法
extend: function (obj) {
var len = arguments.length;
if (len > 1) obj = obj || {};
for (var i = 1; i < len; i++) {
var source = arguments[i];
if (source) {
for (var prop in source) {
if (source.hasOwnProperty(prop)) {
if (_.type(source[prop]) === "array") {
obj[prop] = _.extend([], source[prop]);
} else if (_.type(source[prop]) === "object") {
obj[prop] = _.extend({}, source[prop]);
} else {
obj[prop] = source[prop];
}
}
}
}
}
return obj;
},
addEvent: function (type, el, listener) {
if (arguments.length === 2 && _.type(arguments[1]) === "function") {
el = window;
listener = arguments[1]
}
if (window.addEventListener) {
el.addEventListener(type, listener, false);
} else {
el.attachEvent('on' + type, listener);
}
//store events
if (!el.events) el.events = [];
el.events.push({
type: type,
el: el,
listener: listener
}); //listener
},
closest: function (el, cls) {
if (!el.parentNode) { //document
return null
} else if (cls.indexOf(".") === 0 && el.className.indexOf(cls.substring(1)) >= 0) {
return el;
} else if (cls.indexOf("#") === 0 && el.id.toLowerCase() === cls.substring(1).toLowerCase()) {
return el;
} else if (el.tagName.toLowerCase() === cls.toLowerCase()) {
return el
} else {
return _.closest(el.parentNode, cls)
}
},
sortBy: function (key, asc) {
return function (a, b) {
if (key === "count") {
return asc ? a[key] - b[key] : b[key] - a[key]
} else {
return asc ? a[key].localeCompare(b[key], 'zh-CN') : b[key].localeCompare(a[key], 'zh-CN')
}
}
},
obj2arr: function (obj) {
var keys = [],
vals = [],
typs = [];
for (var key in obj) {
vals.push(obj[key])
keys.push(key)
typs.push(_.type(obj[key]))
}
return {
keys: keys,
vals: vals,
typs: typs
}
},
autoId: function (ele) {
if (!ele.id) {
var id = "_" + Math.random().toString(16).slice(2)
ele.setAttribute("id", id);
}
return ele;
},
queryAll: function (selectors, rootEle) {
if (rootEle) {
rootEle = _.autoId(rootEle)
return document.querySelectorAll("#" + rootEle.id + " " + selectors)
}
return document.querySelectorAll(selectors)
},
query: function (selectors, rootEle) {
if (rootEle) {
rootEle = _.autoId(rootEle)
return document.querySelector("#" + rootEle.id + " " + selectors)
}
return document.querySelector(selectors)
},
fast: function () {
var len = arguments.length,
args = new Array(len), //fast then Array.prototype.slice.call(arguments)
times = 10000;
while (len--) args[len] = arguments[len];
var last = args[args.length - 1];
if (_.type(last) === "number") {
times = last;
args.pop();
}
var _run = function (fn, times) {
var word = 'run ' + fn.name + '{} ' + times + ' time' + (times > 1 ? 's' : '');
console.time(word);
while (times--) fn.apply(this, args);
console.timeEnd(word);
}
args.forEach(function (t) {
t && _run.call(this, t, times);
});
},
start: function (tag, options) {
var sb = [];
sb.push('<');
sb.push(tag);
for (var key in options) {
sb.push(' ' + key + '="' + options[key] + '"');
}
sb.push('>');
return sb.join('');
},
end: function (tag) {
return '</' + tag + '>';
},
//字符串拼接 方式
wrap: function (tag, text, options) {
var i = tag.indexOf(" ");
if (i > 0) {
var leftTag = tag.substring(0, i)
return _.wrap(leftTag, _.wrap(tag.substring(i + 1), text, options))
}
return text === null ? _.start(tag, options) : _.start(tag, options) + text + _.end(tag);
},
//创建DOm 方式
createEle: function (tag, text, props, events) {
var i = tag.indexOf(" ");
if (i > 0) {
var leftTag = tag.substring(0, i)
return _.createEle(leftTag, _.createEle(tag.substring(i + 1), text, props, events))
}
var ele = document.createElement(tag);
var append = function (text) {
switch (_.type(text)) {
case "string":
case "number":
case "date":
// ele.innerHTML += text;
ele.appendChild(document.createTextNode(text));
break;
case "array":
text.forEach(function (t) {
append(t)
})
break;
case "null":
case "nan":
case "undefined":
break;
default:
ele.appendChild(text)
}
}
append(text)
// if (tag.toLowerCase() === "input") {
// }
for (var key in props) {
if (tag.toLowerCase() === "input" && key === "checked") {
if (props[key]) {
ele.setAttribute(key, props[key])
}
} else {
ele.setAttribute(key, props[key])
}
}
for (var key in events) {
_.addEvent(key, ele, events[key]);
}
return ele;
},
checkbox: function (options) {
var checkbox = _.createEle("input", "", _.extend({
type: "checkbox",
class: "checkbox"
}, options), {
click: function (e) {
var el = e.target,
table = _.closest(el, ".dataintable");
var numb = _.queryAll("input[type='checkbox']:checked", table).length
var optPanel = _.query(".optPanel", table)
if (optPanel) {
if (numb === 0) {
optPanel.style.overflow = "hidden"
// optPanel.style.bottom = '-55px';
} else {
// optPanel.style.bottom = '-0px';
optPanel.style.overflow = "visible"
// var HistoryCountSpan=_.query(".HistoryCountSpan")
// HistoryCountSpan.innerText = numb
}
}
}
})
if (options) {
var label = _.div(options.label, {
class: "label"
})
return _.div([checkbox, label], {
class: "input-group"
});
} else {
return checkbox
}
},
btn: function (text, props, events) {
if (props && props.class) {
props.class = "btn " + props.class
}
return _.div(text, _.extend({
class: "btn"
}, props), events)
},
btnGroup: function (text, props, events) {
if (props && props.class) {
props.class = "btn-group " + props.class
}
return _.div(text, _.extend({
class: "btn-group"
}, props), events)
},
stringify: function (el) {
var str = el.tagName.toLowerCase();
str += el.id ? "#" + el.id : "";
str += el.className ? "." + el.className.replace(/\s+/g, ".") : "";
return str;
},
inStyle: function (obj) {
return ["width", "height", "top", "left"].map(function (t) {
return obj[t] ? t + ":" + obj[t] : null
}).join(";")
// return JSON.stringify(obj).replace(/\"/g,"").replace(/,/g,";").replace(/{/,"").replace(/}/,"")
},
img: function (options) {
if (_.type(options) === "object") {
return _.createEle("img", "", _.extend({
src: options.url
}, options))
}
return _.createEle("img", "", {
src: options
})
},
//遍历dom,操作
traversalWidth: function (el) {
var children = el.children,
len = children.length;
for (var i = 0; i < len; i++) {
var t = children[i]
if (parseInt(getComputedStyle(t)["width"]) > 1000) {
// t.setAttribute("witdh","100%")
t.style.width = "100%"
_.traversalWidth(t)
}
}
},
//允许一次加多个样式
//去重
addClass: function (el, cls) {
var arr1 = el.className.split(" ")
var arr2 = cls.split(" ")
var obj = {}
arr1.forEach(function (t) {
obj[t] = 1
})
arr2.forEach(function (t) {
obj[t] = 1
})
var keys = []
for (var key in obj) {
keys.push(key)
}
el.className = keys.join(" ")
return el;
},
removeClass: function (el, cls) {
var arr1 = el.className.split(" ")
var arr2 = cls.split(" ")
var obj = {}
arr1.forEach(function (t) {
if (arr2.indexOf(t) === -1) {
obj[t] = 1
}
})
var keys = []
for (var key in obj) {
keys.push(key)
}
el.className = keys.join(" ")
return el;
},
show: function (el) {
_.removeClass(el, "hide")
_.addClass(el, "show")
},
hide: function (el) {
_.removeClass(el, "show")
_.addClass(el, "hide")
},
click: function (el, callback) {
_.addEvent("click", el, callback)
},
hasClass: function (el, cls) {
var arr = el.className.split(" ")
return arr.indexOf(cls) >= 0
},
getStyle: function (el, attr) {
if (el.currentStyle) {
return el.currentStyle[attr];
} else {
return getComputedStyle(el, false)[attr];
}
}
};
["div", "ul", "li", "tbody", "tfoot", "thead", "td", "tr", "th", "table", "textarea", "i", "span", "colgroup", "col", "a"].forEach(function (t) {
_[t] = function (text, props, events) {
return _.createEle(t, text, props, events)
}
});
// var _log = console.log
// console.log = function () {
// if (_.type(arguments[0]) === "HTMLDivElement".toLowerCase()) {
// _log.call(console, _.stringify(arguments[0]), 'color:blue')
// } else {
// _log.call(console, [].slice.call(arguments).join(" "))
// }
// }
//日期
var _time = _.time = function () {
function Time(dateValue) {
if (!(this instanceof Time)) return new Time(dateValue);
var | this.date = this.constructor.toDate(dateValue);
this.year = t.getFullYear();
this.month = t.getMonth() + 1;
this.day = t.getDate(); //日期 day_of_month
this.hour = t.getHours();
this.minute = t.getMinutes();
this.second = t.getSeconds();
this.msecond = t.getMilliseconds(); //毫秒
this.day_of_week = t.getDay() === 0 ? 7 : t.getDay(); // 星期几 What day is today
// 中国的概念是周一是每周的开始, 周日12pm是每周结束.
this.time = t.getTime();
this.quarter = (t.getMonth() + 3) / 3 << 0; // //季度
}
return _.createClass(Time, {
// 转化为指定格式的String
// 月(M)、日(d)、小时(h)、分(m)、秒(s)、季度(q) 可以用 1-2 个占位符,
// 年(y)可以用 1-4 个占位符,毫秒(S)只能用 1 个占位符(是 1-3 位的数字)
// 例子:
// (new Date()).format("yyyy-MM-dd hh:mm:ss.S") ==> 2006-07-02 08:09:04.423
// (new Date()).format("yyyy-M-d h:m:s.S") ==> 2006-7-2 8:9:4.18
format: function (fmt) {
var self = this;
fmt = fmt || "yyyy-MM-dd hh:mm:ss.S";
var date = this.date
var o = {
"y+|Y+": this.year, //年份4位特殊处理
"M+": this.month,
"d+|D+": this.day,
"h+|H+": this.hour,
"m+": this.minute,
"s+": this.second,
"q+": this.quarter,
"S": this.msecond,
};
Object.keys(o).forEach(function (k, i) {
var v = '' + o[k];
fmt = fmt.replace(new RegExp(k, 'g'), function (t) {
return i === 0 ? v.substr(4 - t.length) : t[1] ? self.constructor.zerofill(v) : v;
})
});
return fmt;
},
//设置当前时间 ,保持当前日期不变
set: function (str) {
if (this.constructor.isTimeString(str)) str = this.format("yyyy-MM-dd") + " " + str;
return this.constructor.toDate(str);
},
//当前时间
setCurTime: function () {
return this.set(this.constructor().format("HH:mm:ss"));
},
add: function (interval, number, date) {
return this.constructor.add(interval, number, date)
},
utc: function () {
return Date.UTC(this.year, this.month - 1, this.day, this.hour, this.minute, this.second, this.msecond)
},
//时区
zone: function () {
return (this.time - this.utc()) / 3600000;
},
diff: function (interval, date1) {
return this.constructor.diff(interval, this.date, date1)
},
//
// weekOfMonth: function() {
// },
//一年中的第几周 WEEK_OF_Year WEEKNUM
// 以周一为周首,周日为周末 以完整的一周计算,可能第一周不足7天
//此处按7天一周计算
week: function (dateStr) {
var day_of_year = 0;
var d = dateStr ? this.constructor(dateStr) : this;
if (!d) return "";
var years = d.year,
month = d.month - 1,
day = d.day,
days = [31, 28, 30, 31, 30, 31, 31, 30, 31, 30, 31];
//4年1闰
if (Math.round(years / 4) === years / 4) days[1] = 29;
days.forEach(function (t, i) {
if (i <= month) day_of_year += day + (i === 0 ? 0 : days[i - 1]);
});
return Math.ceil(day_of_year / 7);
}
}, {
//_.time.toDate("09:30:00")
//_.time.toDate(timeRange[0].begin.format("yyyy-MM-dd") + " " + (new Date()).format(" HH:mm:ss"))
// 指定时间 hh:mm:ss 默认当天日期
// 指定日期 yyyy-MM-dd 默认时间 00:00:00 (非当前时间)
toDate: function (str) {
if (_.type(str) === "date") {
return new Date(+str); //new
} else if (_.type(str) == null) {
return new Date();
} else if (/^\d*$/.test(str)) {
return new Date(+str);
} else if (_.type(str) == "string") {
if (this.isTimeString(str)) str = this().format("yyyy-MM-dd") + " " + str;
return new Date(Date.parse(str.replace(/-/g, "/")));
}
return str;
},
// 时间格式 hh:mm:ss
isTimeString: function (str) {
return /^(\d{1,2}):(\d{1,2})(?::(\d{1,2}))?$/.test(str);
},
set: function (date, time) {
return this.toDate(time ? date + ' ' + time : date);
// var self = this;
// if (this.isTimeString(time)) {
// //时间补0
// time = time.replace(/\d{1,2}/g, function(t) {
// return self.zerofill(t)
// });
// // str.replace(reg, function(t, h, m, s) {
// // console.log(t + ":----")
// // console.log("h:" + h)
// // console.log("m:" + m)
// // console.log("s:" + s)
// // return self.zerofill(t)
// // })
// //(new Date()).format("yyyy-MM-dd")
// date += ' ' + time;
// }
// return new Date(Date.parse(date.replace(/-/g, "/")));
},
//补0
zerofill: function (n) {
n = '' + n;
return n[1] ? n : '0' + n;
},
//时长 格式化
durationFormat: function (duration) {
var self = this;
if (typeof duration !== 'number' || duration < 0) return "00:00";
var hour = duration / 3600 << 0;
duration %= 3600;
var minute = duration / 60 << 0;
duration %= 60;
var second = duration << 0;
var arr = [minute, second];
if (hour > 0) arr.unshift(hour);
return arr.map(function (n) {
return self.zerofill(n)
}).join(':');
},
//映射短名字
shortNameMap: function (s) {
s = ('' + s).toLowerCase();
var m = {
"y": "year",
"m": "month",
"d": "day",
"w": "week",
"h": "hour",
"n": "minute",
"min": "minute",
"s": "second",
"l": "msecond",
"ms": "msecond",
};
return s in m ? m[s] : s;
},
//时间间隔
diff: function (interval, date1, date2) {
var t1 = this(date1),
t2 = this(date2),
_diff = t1.time - t2.time,
seconds = 1000,
minutes = seconds * 60,
hours = minutes * 60,
days = hours * 24,
years = days * 365;
switch (this.shortNameMap(interval)) {
case "year":
result = t1.year - t2.year; //_diff/years
break;
case "month":
result = (t1.year - t2.year) * 12 + (t1.month - t2.month);
break;
case "day":
result = Math.round(_diff / days);
break;
case "hour":
result = Math.round(_diff / hours);
break;
case "minute":
result = Math.round(_diff / minutes);
break;
case "second":
result = Math.round(_diff / seconds);
break;
case "msecond":
result = _diff;
break;
case "week":
result = Math.round(_diff / days) % 7;
break;
default:
result = "invalid";
}
return result;
},
add: function (interval, number, date) {
var date = this.toDate(date);
switch (this.shortNameMap(interval)) {
case "year":
return new Date(date.setFullYear(date.getFullYear() + number));
case "month":
return new Date(date.setMonth(date.getMonth() + number));
case "day":
return new Date(date.setDate(date.getDate() + number));
case "week":
return new Date(date.setDate(date.getDate() + 7 * number));
case "hour":
return new Date(date.setHours(date.getHours() + number));
case "minute":
return new Date(date.setMinutes(date.getMinutes() + number));
case "second":
return new Date(date.setSeconds(date.getSeconds() + number));
case "msecond":
return new Date(date.setMilliseconds(date.getMilliseconds() + number));
}
return date;
}
})
}();
//菜单
var _nav = function () {
var Nav = function (options) {
if (!(this instanceof Nav)) return new Nav(options);
var options = this.options = _.extend({
menu: [{
label: "配置管理",
url: "github.com",
children: [{
label: "配置管理1",
url: "g.cn"
}]
},
{
label: "配置管理2",
children: [{
label: "配置管理21"
}]
}
],
info: {
text: ""
}
}, options)
var el = _.query(options.el);
var menu = options.menu;
var logo = options.logo;
var info = options.info;
var genLink = function (t) {
return t.url ? _.a(t.label, {
href: t.url
}) : t.label
}
var url = location.href;
var cls = {
0: "index-nav-frame-line",
1: "index-nav-frame-line-center",
2: "index-nav-frame-line-li"
}
var checkActive = function (t) {
if (url.indexOf(encodeURI(t.url)) >= 0) {
return true;
}
var children = t.children
if (children) {
for (var i = 0; i < children.length; i++) {
if (checkActive(children[i])) {
return true
}
}
}
}
var lines = menu.map(function (t, i) {
var children = t.children
var lineCenter = [];
if (children) {
var lis = children.map(function (t) {
if (t.hide) return null;
return _.div(genLink(t), {
class: cls["2"]
})
})
lineCenter = _.div(lis, {
class: cls["1"]
})
}
// var active = url.indexOf(t.url) >= 0 ? " active" : "";
var active = checkActive(t) ? " active" : "";
return _.div([genLink(t)].concat(lineCenter), {
class: cls["0"] + active,
tabindex: "-1"
}, {
click: function (e) {
_.queryAll("." + cls["0"]).forEach(function (t) {
t.className = cls["0"]
})
this.className = cls["0"] + " active"
}
})
})
var logo = _.div(_.img(logo), {
class: "nav-small",
style: _.inStyle(logo),
tabindex: "-1"
})
var info = _.div(info.text, {
class: "index-nav-info"
})
var navIndex = _.div([logo].concat(lines).concat([info]), {
class: "index-nav"
})
el.appendChild(navIndex)
}
return _.createClass(Nav, {
});
}();
var _websql = function () {
var Websql = function (options) {
if (!(this instanceof Websql)) return new Websql(options);
var options = this.options = _.extend({
dbname: "mydb",
version: "1.0",
desc: "teset db",
dbsize: 2 * 1024 * 1024
}, options)
//创建数据库
var db = this.db = window.openDatabase(
options.dbname,
options.version,
options.desc,
options.dbsize
);
//表结构
this.tbls = options.tbls || [];
//日期表
this.tbls.sys_log = [{
prop: "time",
label: '时间',
type: "date",
format: "yyyy-mm-dd hh:mm:ss"
},
{
prop: "sql",
label: 'SQL',
type: "sql"
},
{
prop: "duration",
label: '执行时间',
type: "number"
}
]
//表数据
this.data = options.data;
//所有数据
this.rs = [];
//[{
// tbl:tbl
// sql:sql
// }]
this.sqls = [];
this.gridConfig = [{
label: "显示字段别名",
checked: true,
name: "label"
}, {
label: "显示序号列",
checked: true,
name: "seq"
},
{
label: "显示选择列",
checked: true,
name: "check"
},
{
label: "合计数字列",
checked: true,
name: "statistic"
},
{
label: "固定表头",
checked: false,
name: "fixedhead"
},
// {
// label: "允许多表查询",
// checked: true,
// name: "showMultisql"
// }
]
}
return _.createClass(Websql, {
_: _,
nav: _nav,
createTbls: function (tbls) {
var tbls = tbls || this.tbls;
var _this = this;
_this.sqls = [];
this.db.transaction(function (tx) {
for (var t in tbls) {
var flds = tbls[t].map(function (t) {
return (t.prop ? t.prop : t) + (t.pk ? " unique" : "");
});
var sql = `CREATE TABLE IF NOT EXISTS ${t}(${flds})`
console.log(sql)
_this.sqls.push({
tbl: t,
sql: sql
})
// tx.executeSql(sql, [], function (ctx, result) {
// console.log("创建表成功 " + t);
// }, function (tx, error) {
// console.error('创建表失败:' + t + error.message);
// // throw new Error(error);
// _this.errorCall && _this.errorCall(error.message)
// })
}
_this.setSqlcmd.call(_this)
});
},
insert: function (tbl, rs, callback) {
var _this = this;
_this.sqls = [];
this.db.transaction(function (tx) {
var typs = [];
var flds = _this.tbls[tbl].map(function (t) {
typs.push(t.type)
return t.prop ? t.prop : t;
});
console.log(flds)
rs.forEach(function (r) {
// var sql = `INSERT INTO ${tbl}(${flds}) values(${new Array(flds.length).fill("?")})`;
var vs = flds.map(function (t, i) {
switch (typs[i]) {
// case "string":
// break;
case "number":
return r[t]
default:
return "'" + r[t] + "'"
}
});
_this.sqls.push({
tbl: tbl,
sql: `INSERT INTO ${tbl}(${flds}) values(${vs})`
})
// tx.executeSql(sql, vs, function (tx, result) {
// console.log("insert ok")
// // console.log(tx, result)
// }, function (tx, error) {
// console.log("insert fail")
// // console.log(error.message)
// // throw new Error(error);
// _this.errorCall && _this.errorCall(error.message)
// });
})
//callback && callback(tbl);
_this.setSqlcmd.call(_this)
});
},
empty: function (tbls, callback) {
var tbls = tbls == null || tbls.length == 0 ? this.tbls : tbls;
var _this = this;
_this.sqls = []
for (var t in tbls) {
_this.sqls.push({
sql: `DELETE FROM ${t}`,
tbl: t
})
}
// this.exe(_this.sqls, callback)
_this.setSqlcmd.call(_this)
// var del = function (tx, t) {
// var sql = `DELETE FROM ${t}`
// console.log(tx, sql)
// tx.executeSql(sql, [], function (ctx, result) {
// console.log("删除表成功 " + t);
// }, function (tx, error) {
// console.error('删除表失败:' + t + error.message);
// // throw new Error(error);
// _this.errorCall && _this.errorCall(error.message)
// })
// }
// this.db.transaction(function (tx) {
// for (var t in tbls) {
// del(tx, t)
// callback && callback(t)
// }
// });
},
del: function (tbl, ids) {
var sql = `DELETE FROM ${tbl} Where rowid in [${ids}]`
this.exe({
sql: sql,
tbl: tbl
})
// this.db.transaction(function (tx) {
// console.log(sql)
// tx.executeSql(sql, [], function (ctx, result) {
// console.log("删除表成功 " + tbl);
// }, function (tx, error) {
// console.error('删除表失败:' + tbl + error.message);
// })
// })
},
drop: function () {
var tbls = tbls == null || tbls.length == 0 ? this.tbls : tbls;
var _this = this;
_this.sqls = []
for (var t in tbls) {
_this.sqls.push({
sql: `drop table ${t}`,
tbl: t
})
}
_this.setSqlcmd.call(_this)
},
//查询
list: function (tbls, options, callback) {
var arr = ["WHERE 1=1"]
for (var key in options) {
if (key !== "orderby" && key !== "groupby") {
if (_.type(options[key]) === "number") {
arr[arr.length - 1] = key + "=" + options[key] + "";
} else {
arr[arr.length - 1] = key + "='" + options[key] + "'";
}
}
}
var condition = arr.join(" & ");
if (options.groupby) {
condition += " GROUP BY " + options.groupby
}
if (options.orderby) {
condition += " ORDER BY " + options.orderby
}
var tbls = tbls || [];
var _this = this;
_this.sqls = tbls.map(t => {
return {
tbl: t,
sql: `SELECT * FROM ${t} ${condition}`
}
})
this.exe(_this.sqls, callback)
_this.setSqlcmd.call(_this)
},
//执行sql
exe: function (sql, callback, errorCall) {
var _this = this;
var store = function (tx, sql, tbl) {
// console.log(sql)
console.time(sql);
var timeStart = +new Date();
tx.executeSql(sql, [], function (tx, results) {
_this.sqls.forEach(function (t) {
if (t.tbl === tbl) {
t.sql = sql
}
})
_this.rs[tbl] = [];
for (var i = 0; i < results.rows.length; i++) {
_this.rs[tbl].push(results.rows.item(i));
}
var timeEnd = +new Date();
console.timeEnd(sql);
var duration = timeEnd - timeStart
if (tbl !== "sys_log")
_this.log(sql, duration)
callback && callback.call(_this, _this.rs[tbl], tbl);
}, function (tx, error) {
// console.error( error.message);
errorCall && errorCall(error.message)
});
}
this.db.transaction(function (tx) {
if (_.type(sql) === "array") {
sql.forEach(t => {
store(tx, t.sql, t.tbl)
})
} else {
store(tx, sql.sql, sql.tbl)
}
})
},
log: function (sql, duration) {
// var sql=`insert into`
var sqlLog = `INSERT INTO sys_log values(?,?,?)`;
var vals = [+new Date(), sql, duration]
this.db.transaction(function (tx) {
tx.executeSql(sqlLog, vals, function (tx, results) {
console.log(results)
}, function (tx, error) {
// console.error( error.message);
// errorCall && errorCall(error.message)
})
})
},
//表名导航
hd: function () {
var tbls = [];
var _this = this;
for (var tbl in _this.tbls) {
if (tbl.indexOf("sys_") === -1) {
tbls.push(tbl)
}
}
return _.wrap("ul", tbls.map(function (t) {
return _.wrap("li", _.wrap("i", "") +
_.wrap("div", t, {
class: "text"
})
)
}).join(""))
},
createHd: function () {
var tbls = [];
var _this = this;
for (var tbl in _this.tbls) {
if (tbl.indexOf("sys_") === -1) {
tbls.push(tbl)
}
}
return _.ul(tbls.map(function (t) {
return _.li([_.i(""),
_.div(t, {
class: "text"
})
])
}))
},
createSlide: function () {
var _this = this;
var hd = _.div(this.createHd(), {
class: "hd"
}, {
click: function (e) {
var el = e.target;
console.log(el)
var li = _.closest(el, "li")
var config = _this.getGridConfig();
if (li) {
var tname = li.innerText;
if (!config.multisql) {
_this.toggleHd(tname)
var actLis = _.queryAll(".slide .hd li[active]")
if (actLis) {
var tbls = []
actLis.forEach(function (t) {
tbls.push(t.innerText);
})
_this.createList(tbls)
}
} else {
_this.createList([tname])
// _this.activeHd(tname)
}
}
}
})
var bd = _.div("", {
class: "bd"
}, {
click: function (e) {
var el = e.target,
thead = _.closest(el, "thead"),
// table = _.closest(el, "table");
table = _.closest(el, ".dataintable");
if (thead) {
var tbody = _.query("tbody", table);
if (el.nodeName.toLowerCase() === "input" && el.getAttribute("type") === "checkbox") {
//全选
var inputs = _.queryAll("input[type='checkbox']", tbody);
inputs.forEach(function (t) {
t.checked = el.checked; //!t.checked;
})
} else {
//排序
var td = _.closest(el, "th")
if (!td) return;
var prop = td.getAttribute("prop");
if (prop) {
var seq = td.getAttribute("seq")
seq = seq === "desc" ? "asc" : "desc"
_.queryAll("th[seq]", thead).forEach((t) => {
t.removeAttribute("seq")
})
td.setAttribute("seq", seq)
var tname = table.getAttribute("tablename");
var options = {
orderby: prop + " " + seq
}
_this.list([tname], options || {}, function (rs, tname) {
tbody.parentNode.replaceChild(_this.createGrid(tname, rs, options, "tbody"), tbody)
// _this.setSqlcmd.call(_this, tname)
});
}
}
} else {
var tr = _.closest(el, "tr")
if (!tr) return;
_.queryAll("tr[active]", table).forEach((t) => {
t.removeAttribute("active")
})
tr.setAttribute("active", "");
}
}
})
var container = _.div([hd, bd], {
class: "slide_container"
})
return _.div(container, {
class: "slide"
})
},
createBtns: function () {
var btns = [{
key: "createTbls",
val: "初始"
}, {
key: "add",
val: "增加"
}, {
key: "empty",
val: "清空"
}, {
key: "list",
val: "列表"
}, {
key: "drop",
val: "删表"
}, {
key: "log",
val: "日志"
}].map((t) => {
return _.btn(t.val, {
class: t.key
})
})
var _this = this;
var btnGroup = _.div(btns, {
class: "btn-group"
}, {
click: function (e) {
var act = e.target.className.split(" ")[1]
console.log(e.target, act)
switch (act) {
case "list":
var tbls = [];
for (var tbl in _this.tbls) {
tbls.push(tbl)
}
_this.createList(tbls);
break;
case "add":
var tbls = []
_.queryAll(".dataintable").forEach(function (t) {
tbls.push(t.getAttribute("tablename"))
})
for (var tbl in _this.data) {
console.log(tbl)
// if(tbls.indexOf(tbl)>=0){
// }else{
// }
_this.insert(tbl, _this.data[tbl])
// _this.insert(tbl, data[tbl], tbls.indexOf(tbl) >= 0 ? _this.reflashList.bind(_this) : null)
}
break;
case "empty":
_this.empty([], _this.reflashList.bind(_this));
break;
case "del":
_this.del("SSF_ORDER_DETAILS", 1);
break;
case "log":
_this.createList(["sys_log"])
break;
default:
act && _this[act] && _this[act]();
}
}
})
return btnGroup
},
reflashList: function (tbl) {
// console.log(tbl);
// var activeLi = document.querySelector(".slide .hd li[active]");
// if (activeLi) {
// var tname = activeLi.innerText.trim();
// // if (tbl === tname)
// this.createList([tname]);
// }
var tbls = [];
_.queryAll(".dataintable").forEach(function (t) {
tbls.push(t.getAttribute("tablename"));
})
this.createList(tbls);
},
//代替showList 创建el方式替代字符串拼接
createList: function (tbls, options) {
var bd = document.querySelector(".slide .bd")
console.log(bd)
var _this = this;
var tbls = tbls || []
bd.innerHTML = "";
_this.list(tbls, options || {}, function (rs, tname) {
bd.appendChild(_.li(_this.createGrid(tname, rs, options)))
// _this.setSqlcmd.call(_this)
});
},
//sql关键字高亮
hightlightSql: function (sql) {
var keys = ["select", "from", "where", "desc", "asc", "on", "delete", "values",
"if", "not", "EXISTS", "unique",
"insert\\s+into", "create\\s+table", "drop\\s+table",
"order\\s+by", "group\\s+by", "left\\s+join", "right\\s+join", "inner\\s+join"
]
var reg1 = new RegExp("(" + keys.join("|") + ")", "gi");
return sql.replace(reg1, function (t) {
return _.wrap("font", (t.toUpperCase()).replace(/\s+/, " "), {
class: "red"
})
}).replace(/;\s*/g, ";<br>")
},
setSqlcmd: function (tname) {
var sqlcmd = _.query(".sqlcmd .textarea")
var _this = this;
if (sqlcmd) {
// var tbls = this.getTbls()
if (sqlcmd.tagName.toLowerCase() === "textarea") {
sqlcmd.value = this.sqls.map(function (t) {
return t.sql
}).join(";\n")
// tbls.map(function (t) {
// return (_this.sqls[t] || "").trim()
// }).join(";\n")
} else { //contenteditable
// sqlcmd.innerHTML = tbls.map(function (t) {
// return _this.hightlightSql((_this.sqls[t] || "").trim())
// }).join(";<br>")
// sqlcmd.innerHTML = _this.hightlightSql(tbls.map(function (t) {
// return (_this.sqls[t] || "").trim()
// }).join(";"))
sqlcmd.innerHTML = _this.hightlightSql(this.sqls.map(function (t) {
return t.sql
}).join(";\n"))
}
// this.activeHd(tbls)
}
},
getTbls: function () {
var tbls = []
_.queryAll(".dataintable").forEach(function (t) {
tbls.push(t.getAttribute("tablename"))
})
return tbls
},
//根据rs取得默认表结构
getTbl: function (rs, tname) {
var arr = _.obj2arr(rs[0]),
keys = arr.keys,
typs = arr.typs;
var tbl = this.tbls[tname.trim()] || [];
return keys.map(function (t, i) {
var fld = tbl.filter(function (f) {
return f.prop === t
})[0];
return {
prop: t,
label: fld && fld.label || t,
type: fld && fld.type || typs[i],
format: fld && fld.format || ""
}
})
// var tbl = this.tbls[tname] || this.getTbl(rs);
// tbl.forEach(function (t) {
// t.hide=_.type(rs[0][t.prop]) === "undefined"
// })
},
//生成表格 dom节点,可加载事件
createGrid: function (tname, rs, options, resultType) {
var _this = this;
var tbl = this.getTbl(rs, tname);
var config = _.extend(this.getGridConfig(), options)
if (!resultType && config.fixedhead) {
resultType = "fixedhead"
}
var _row = function (r, i) {
var arr = _.obj2arr(r),
vals = arr.vals;
var cell = vals.map(function (t, j) {
//计算
switch (typs[j]) {
case "number":
var colIndex = j + offset;
//数据行累加计算
if (i > 0 && t) {
tfoot[colIndex] += parseFloat(t);
}
//最后一行,处理小数
if (i === len) {
//保留1位并去掉多余0
tfoot[colIndex] = parseFloat(tfoot[colIndex].toFixed(1))
}
break;
case "date":
t = [_.createEle("i", "", {
class: "date"
}), fmts[j] ? _time(t).format(fmts[j]) : t]
break;
}
return _.td(t, {
class: typs[j]
});
});
if (config.seq) cell.unshift(_.td(i === 0 ? "#" : i));
if (config.check) cell.unshift(_.td(_.checkbox()));
return _.tr(cell, {
rowid: i
})
}
//类型
var typs = [];
var fmts = [];
var offset = 0;
if (config.check) offset++;
if (config.seq) offset++;
var len = rs.length;
var showFoot = false;
//定义列宽
var defWidth = (100 - 5 * offset) / tbl.length + "%"
var colgroup = tbl.map(function (t) {
return _.col("", {
// style: "width: " + (t.width ? t.width : "auto") + ";"
style: "width: " + (t.width ? t.width : defWidth) + ";"
})
})
if (config.seq) colgroup.unshift(_.col("", {
style: "width: 5%;"
}));
if (config.check) colgroup.unshift(_.col("", {
style: "width: 5%;"
}));
var orderby = config.orderby || ""
var thead =
tbl.map(function (t) {
var typ = t.type ? t.type : "string";
var fmt = t.format ? t.format : "";
var lable = t.label ? t.label : t;
var prop = t.prop ? t.prop : t;
var seq = "";
var hide = !!t.hide;
if (!config.label) lable = prop;
//排序
if (prop === orderby.split(" ")[0]) {
seq = orderby.split(" ")[1] || "asc"
}
if (!hide) {
if (typ === "number" && config.statistic) showFoot = true;
typs.push(typ);
fmts.push(fmt);
}
return hide ? "" : _.th([_.div(lable, {
class: "text"
}), _.div("", {
class: "icon"
})], {
class: typ,
prop: prop,
seq: seq
});
});
if (config.seq) thead.unshift(_.th("#"));
if (config.check) thead.unshift(_.th(_.checkbox()));
var tfoot = new Array(offset).fill("").concat(typs).map(function (t, i) {
return i === 0 ? "合计" : t === "number" ? 0 : "";
});
var tbody =
len === 0 ? _.createEle("tr td", "未查到记录", {
colspan: tbl.length + offset
}) :
rs.map(function (r, i) {
return _row(r, i + 1)
});
tfoot = showFoot ? tfoot.map(function (t) {
return _.td(t, {
class: t === "" ? "string" : "number"
});
}) : "";
var optPanel = _.div([_.btn("删除", {}, {
click: function (e) {
var el = e.target,
table = _.closest(el, ".dataintable");
var cbs = _.queryAll("tbody input[type='checkbox']:checked", table);
var tablename = table.getAttribute("tablename")
// var sql=""
cbs.forEach(function (t) {
// sql=`delete from ${tablename} where id=`
var tr = _.closest(t, "tr")
var rowid = tr.getAttribute("rowid")
_this.sqls.push({
tbl: tbl,
sql: `delete from ${tablename} where rowid=${rowid}`
})
})
_this.setSqlcmd.call(_this)
}
}), _.btn("取消", {}, {
click: function (e) {
var el = e.target,
table = _.closest(el, ".dataintable");
var cbs = _.queryAll("input[type='checkbox']:checked", table);
cbs.forEach(function (t) {
t.checked = false;
})
optPanel.style.overflow = "hidden"
}
})], {
class: "optPanel"
})
switch (resultType) {
case "colgroup":
_.colgroup(colgroup);
break;
case "thead":
return _.thead(thead);
break;
case "tbody":
return _.tbody(tbody);
break;
case "tfoot":
return _.tfoot(tfoot);
break;
case "fixedhead":
var cols = _.colgroup(colgroup)
return _.div([
_.div(_.table([cols, _.thead(thead)], {
// class: "dataintable",
tablename: tname
}), {
class: "table-fixed-head"
}),
_.div(_.table([cols.cloneNode(true), _.tbody(tbody), _.tfoot(tfoot)], {
// class: "dataintable",
tablename: tname
}), {
class: "table-fixed-body"
})
], {
class: "dataintable",
tablename: tname
})
break;
default:
return _.div([_.table([_.colgroup(colgroup), _.thead(thead), _.tbody(tbody), _.tfoot(tfoot)], {
tablename: tname
}), optPanel], {
class: "dataintable",
tablename: tname
});
}
},
//字符串拼接方式 生成表格
grid: function (tname, rs, options) {
var tbl = this.tbls[tname];
var _row = function (r, i) {
var tag = "td",
arr = _.obj2arr(r),
vals = arr.vals;
var cell =
(options.check ? _.wrap(tag, checkbox) : "") +
(options.seq ? _.wrap(tag, i === 0 ? "#" : i) : "") +
vals.map(function (t, j) {
//计算
switch (typs[j]) {
case "number":
var colIndex = j + offset;
//数据行累加计算
if (i > 0 && t) {
tfoot[colIndex] += parseFloat(t);
}
//最后一行,处理小数
if (i === len) {
//保留1位并去掉多余0
tfoot[colIndex] = parseFloat(tfoot[colIndex].toFixed(1))
}
break;
case "date":
t = _.wrap("i", "", {
class: "date"
}) + t
break;
}
return _.wrap("td", t, {
class: typs[j]
});
}).join("");
return _.wrap("tr", cell, {
rowid: i
})
}
//类型
var typs = [];
var offset = 0;
if (options.check) offset++;
if (options.seq) offset++;
var len = rs.length;
var checkbox = _.wrap("div", '<input type="checkbox"/>');
var thead = (options.check ? _.wrap("th", checkbox) : "") +
(options.seq ? _.wrap("th", "#") : "") +
tbl.map(function (t) {
var typ = t.type ? t.type : "string";
var lable = t.label ? t.label : t;
var prop = t.prop ? t.prop : t;
lable += _.wrap("div", "", {
class: "icon"
})
typs.push(typ);
return _.wrap("th", lable, {
class: typ,
prop: prop
});
}).join("");
var tfoot = new Array(offset).fill("").concat(typs).map(function (t, i) {
return i === 0 ? "合计" : t === "number" ? 0 : "";
});
var tbody =
len === 0 ? _.wrap("tr td", "未查到记录", {
colspan: tbl.length + offset
}) :
rs.map(function (r, i) {
return _row(r, i + 1)
}).join("");
tfoot = tfoot.map(function (t) {
return _.wrap("td", t, {
class: "number"
});
}).join("");
return _.wrap("table",
_.wrap("thead", thead) +
_.wrap("tbody", tbody) +
_.wrap("tfoot", tfoot), {
class: "dataintable",
tablename: tname
});
},
activeHd: function (tbl) {
var lis = _.queryAll(".slide .hd li")
var tbls = _.type(tbl) === "array" ? tbl : [tbl];
lis.forEach(function (t) {
if (tbls.indexOf(t.innerText) >= 0) {
t.setAttribute("active", "")
} else {
t.removeAttribute("active");
}
})
},
toggleHd: function (tbl) {
var lis = _.queryAll(".slide .hd li")
lis.forEach(function (t) {
if (t.innerText === tbl) {
t.hasAttribute("active") ? t.removeAttribute("active") : t.setAttribute("active", "")
}
})
},
//获取选中文本
getSelectedText: function (inputDom) {
if (document.selection) //IE
{
return document.selection.createRange().text;
} else {
var val = this.getTextareaValue(inputDom)
return val.substring(inputDom.selectionStart,
inputDom.selectionEnd) || val;
}
},
getTextareaValue: function (inputDom) {
var val = "";
if (inputDom.tagName.toLowerCase() === "textarea") {
val = inputDom.value
} else {
val = inputDom.innerText
//由于contenteditable属性产生的换行机制问题
//纯文本模式下,会加Unicode等于10和160的2位字符,
val = val.replace(/[\u000A|\u00A0]/g, function (t) {
return " "
})
}
return val
// for(var i=0 ;i<sql.length;i++){
// console.log(sql[i]+":"+sql.charCodeAt(i))
// }
},
//设置高亮
setTextSelected: function (inputDom, startIndex, endIndex) {
if (inputDom.setSelectionRange) {
inputDom.setSelectionRange(startIndex, endIndex);
} else if (inputDom.createTextRange) //IE
{
var range = inputDom.createTextRange();
range.collapse(true);
range.moveStart('character', startIndex);
range.moveEnd('character', endIndex - startIndex - 1);
range.select();
}
inputDom.focus();
},
createSqlcmd: function () {
// var textarea = _.textarea("", {
// cols: "80",
// rows: "5",
// })
var _this = this;
var textarea = _.div("", {
contentEditable: "plaintext-only",
class: "textarea",
// placeholder:"这里输入sql"
}, {
blur: function (e) { //sql语法高亮
textarea.innerHTML =
_this.hightlightSql(_this.getTextareaValue(textarea))
}
})
var btn = _.btn("执行sql", {
class: "exesql"
}, {
click: function (e) {
var bd = document.querySelector(".slide .bd")
bd.innerHTML = "";
var options = {}
var sql = _this.getSelectedText(textarea) //textarea.value
if (!sql) {
bd.innerHTML = "请输入sql"
return;
}
var tnames = [];
sql.split(";").forEach(function (t) {
//查询语句
if ((/select\s[\s\S]+from\s/i).test(t)) {
var tname = ((t.match(/from\s(\S+)\s?/i) || [])[1] || "sqlcmd").toUpperCase();
// console.log(tname)
_this.exe({
sql: t,
tbl: tname
}, function (rs, tbl) {
bd.appendChild(_.li(_this.createGrid(tbl, rs, options)))
tnames.push(tname)
_this.activeHd(tnames)
}, function (errormsg) {
bd.appendChild(document.createTextNode(errormsg))
_this.activeHd("")
})
} else {
//非查询语句
_this.exe({
sql: t,
tbl: ""
}, function () {
// console.log("ok")
// bd.appendChild(document.createTextNode("ok"))
bd.appendChild(_.div(t + ";", {
class: "sql"
}))
}, function (errormsg) {
bd.appendChild(document.createTextNode(errormsg))
})
}
})
}
})
var checkboxs = this.gridConfig.map(function (t) {
return _.checkbox(t)
})
var btnGroup = _.div([btn].concat(checkboxs), {
class: "sqlcmd-btn-group"
})
return _.div([textarea, btnGroup], {
class: "sqlcmd"
})
},
getGridConfig: function () {
var config = {}
this.gridConfig.forEach(function (t) {
var val = _.query("input[name='" + t.name + "']").checked
// var key = t.name.substring(4).toLowerCase();
var key = t.name.toLowerCase();
config[key] = val
})
return config
}
})
}();
if (!window.openDatabase) {
alert("当前环境不支持websql!请用谷歌浏览器试试!");
return;
}
return _websql
}) | t = |
api_op_CreateInsight.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package securityhub
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
"github.com/aws/aws-sdk-go-v2/private/protocol"
)
type CreateInsightInput struct {
_ struct{} `type:"structure"`
// One or more attributes used to filter the findings included in the insight.
// Only findings that match the criteria defined in the filters are included
// in the insight.
//
// Filters is a required field
Filters *AwsSecurityFindingFilters `type:"structure" required:"true"`
// The attribute used as the aggregator to group related findings for the insight.
//
// GroupByAttribute is a required field
GroupByAttribute *string `type:"string" required:"true"`
// The name of the custom insight to create.
//
// Name is a required field
Name *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CreateInsightInput) String() string {
return awsutil.Prettify(s)
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateInsightInput) Validate() error {
invalidParams := aws.ErrInvalidParams{Context: "CreateInsightInput"}
if s.Filters == nil {
invalidParams.Add(aws.NewErrParamRequired("Filters"))
}
if s.GroupByAttribute == nil {
invalidParams.Add(aws.NewErrParamRequired("GroupByAttribute"))
}
if s.Name == nil {
invalidParams.Add(aws.NewErrParamRequired("Name"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// MarshalFields encodes the AWS API shape using the passed in protocol encoder.
func (s CreateInsightInput) MarshalFields(e protocol.FieldEncoder) error {
e.SetValue(protocol.HeaderTarget, "Content-Type", protocol.StringValue("application/json"), protocol.Metadata{})
if s.Filters != nil |
if s.GroupByAttribute != nil {
v := *s.GroupByAttribute
metadata := protocol.Metadata{}
e.SetValue(protocol.BodyTarget, "GroupByAttribute", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)
}
if s.Name != nil {
v := *s.Name
metadata := protocol.Metadata{}
e.SetValue(protocol.BodyTarget, "Name", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)
}
return nil
}
type CreateInsightOutput struct {
_ struct{} `type:"structure"`
// The ARN of the insight created.
//
// InsightArn is a required field
InsightArn *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CreateInsightOutput) String() string {
return awsutil.Prettify(s)
}
// MarshalFields encodes the AWS API shape using the passed in protocol encoder.
func (s CreateInsightOutput) MarshalFields(e protocol.FieldEncoder) error {
if s.InsightArn != nil {
v := *s.InsightArn
metadata := protocol.Metadata{}
e.SetValue(protocol.BodyTarget, "InsightArn", protocol.QuotedValue{ValueMarshaler: protocol.StringValue(v)}, metadata)
}
return nil
}
const opCreateInsight = "CreateInsight"
// CreateInsightRequest returns a request value for making API operation for
// AWS SecurityHub.
//
// Creates a custom insight in Security Hub. An insight is a consolidation of
// findings that relate to a security issue that requires attention or remediation.
// Use the GroupByAttribute to group the related findings in the insight.
//
// // Example sending a request using CreateInsightRequest.
// req := client.CreateInsightRequest(params)
// resp, err := req.Send(context.TODO())
// if err == nil {
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/securityhub-2018-10-26/CreateInsight
func (c *Client) CreateInsightRequest(input *CreateInsightInput) CreateInsightRequest {
op := &aws.Operation{
Name: opCreateInsight,
HTTPMethod: "POST",
HTTPPath: "/insights",
}
if input == nil {
input = &CreateInsightInput{}
}
req := c.newRequest(op, input, &CreateInsightOutput{})
return CreateInsightRequest{Request: req, Input: input, Copy: c.CreateInsightRequest}
}
// CreateInsightRequest is the request type for the
// CreateInsight API operation.
type CreateInsightRequest struct {
*aws.Request
Input *CreateInsightInput
Copy func(*CreateInsightInput) CreateInsightRequest
}
// Send marshals and sends the CreateInsight API request.
func (r CreateInsightRequest) Send(ctx context.Context) (*CreateInsightResponse, error) {
r.Request.SetContext(ctx)
err := r.Request.Send()
if err != nil {
return nil, err
}
resp := &CreateInsightResponse{
CreateInsightOutput: r.Request.Data.(*CreateInsightOutput),
response: &aws.Response{Request: r.Request},
}
return resp, nil
}
// CreateInsightResponse is the response type for the
// CreateInsight API operation.
type CreateInsightResponse struct {
*CreateInsightOutput
response *aws.Response
}
// SDKResponseMetdata returns the response metadata for the
// CreateInsight request.
func (r *CreateInsightResponse) SDKResponseMetdata() *aws.Response {
return r.response
}
| {
v := s.Filters
metadata := protocol.Metadata{}
e.SetFields(protocol.BodyTarget, "Filters", v, metadata)
} |
products.controller.ts | import { Controller, Post, Body, Get, Param } from '@nestjs/common';
import { ProductsService } from './products.service';
@Controller('products')
export class ProductsController {
constructor(private readonly productsService: ProductsService) {}
| addProduct(
@Body('title') prodTitle: string,
@Body('description') prodDesc: string,
@Body('price') prodPrice: number,
): any {
const generatedId = this.productsService.insertProduct(
prodTitle,
prodDesc,
prodPrice,
);
return { id: generatedId };
}
@Get()
getAllProducts(): any {
return this.productsService.getProducts();
}
@Get(':prodId')
getProduct(@Param('prodId') prodId: string) {
return this.getProduct(prodId);
}
} | @Post() |
index.js | /**
* @author Charlie Calvert
*/
function myTests() {
'use strict';
test('MyFirstTest', function() {
ok(true);
});
test('MySecondTest', function() {
ok(1 === 1); | var app = new App();
var result = app.add(2, 3);
var expected = 5;
equal(result, expected);
});
test('TestAdd2', function() {
var app = new App();
var result = app.add(2, 3);
var expected = 5;
ok(result === expected, 'result for now: ' + result);
});
test('TestMultiply', function() {
var app = new App();
var result = app.multiply(2, 3);
var expected = 6;
equal(result, expected, 'result for now: ' + result);
});
/* asyncTest('readJson', function() {
var app = new App();
app.readJsonTest(function(data) {
equal(data.Result, 'Success');
start();
}, function(request, ajaxOptions, thrownError) {
ok(false, 'call to readJson failed: ' + request.responseText);
start();
});
}); */
test('readJson', function() {
$.ajax = function(options) {
equal(options.url, "MyData.json");
options.success({
"Result": "Success"
});
};
var app = new App();
app.readJsonTest(function(data) {
equal(data.Result, 'Success');
//start();
}, function(request, ajaxOptions, thrownError) {
ok(false, 'call to readJson failed: ' + request.responseText);
//start();
});
});
}
$(document).ready(function() {
'use strict';
myTests();
}); | });
test('TestAdd', function() { |
node.go | package daemon
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"github.com/golang/glog"
"github.com/openshift/machine-config-operator/pkg/daemon/constants"
corev1 "k8s.io/api/core/v1"
)
func (dn *Daemon) loadNodeAnnotations(node *corev1.Node) (*corev1.Node, error) {
ccAnnotation, err := getNodeAnnotation(node, constants.CurrentMachineConfigAnnotationKey)
// we need to load the annotations from the file only for the
// first run.
// the initial annotations do no need to be set if the node
// already has annotations.
if err == nil && ccAnnotation != "" {
return node, nil
}
glog.Infof("No %s annotation on node %s: %v, in cluster bootstrap, loading initial node annotation from %s", constants.CurrentMachineConfigAnnotationKey, node.Name, node.Annotations, constants.InitialNodeAnnotationsFilePath)
d, err := ioutil.ReadFile(constants.InitialNodeAnnotationsFilePath)
if err != nil && !os.IsNotExist(err) {
return nil, fmt.Errorf("failed to read initial annotations from %q: %w", constants.InitialNodeAnnotationsFilePath, err)
}
if os.IsNotExist(err) |
var initial map[string]string
if err := json.Unmarshal(d, &initial); err != nil {
return nil, fmt.Errorf("failed to unmarshal initial annotations: %w", err)
}
glog.Infof("Setting initial node config: %s", initial[constants.CurrentMachineConfigAnnotationKey])
n, err := setNodeAnnotations(dn.kubeClient.CoreV1().Nodes(), dn.nodeLister, node.Name, initial)
if err != nil {
return nil, fmt.Errorf("failed to set initial annotations: %w", err)
}
return n, nil
}
// getNodeAnnotation gets the node annotation, unsurprisingly
func getNodeAnnotation(node *corev1.Node, k string) (string, error) {
return getNodeAnnotationExt(node, k, false)
}
// getNodeAnnotationExt is like getNodeAnnotation, but allows one to customize ENOENT handling
func getNodeAnnotationExt(node *corev1.Node, k string, allowNoent bool) (string, error) {
v, ok := node.Annotations[k]
if !ok {
if !allowNoent {
return "", fmt.Errorf("%s annotation not found on node '%s'", k, node.Name)
}
return "", nil
}
return v, nil
}
| {
// try currentConfig if, for whatever reason we lost annotations? this is super best effort.
currentOnDisk, err := dn.getCurrentConfigOnDisk()
if err == nil {
glog.Infof("Setting initial node config based on current configuration on disk: %s", currentOnDisk.GetName())
return setNodeAnnotations(dn.kubeClient.CoreV1().Nodes(), dn.nodeLister, node.Name, map[string]string{constants.CurrentMachineConfigAnnotationKey: currentOnDisk.GetName()})
}
return nil, err
} |
solver_benchmarks.rs | use criterion::{black_box, criterion_group, criterion_main, Criterion};
use sudoku_solver::*;
fn hard_solvable_puzzle(c: &mut Criterion) {
let board = Board::from(&[
[0, 2, 0, 0, 0, 0, 0, 0, 0], // row 1
[0, 0, 0, 6, 0, 0, 0, 0, 3], // row 2
[0, 7, 4, 0, 8, 0, 0, 0, 0], // row 3
[0, 0, 0, 0, 0, 3, 0, 0, 2], // row 4
[0, 8, 0, 0, 4, 0, 0, 1, 0], // row 5
[6, 0, 0, 5, 0, 0, 0, 0, 0], // row 6
[0, 0, 0, 0, 1, 0, 7, 8, 0], // row 7
[5, 0, 0, 0, 0, 9, 0, 0, 0], // row 8
[0, 0, 0, 0, 0, 0, 0, 4, 0], // row 9
]);
c.bench_function("hard solvable puzzle", |b| {
b.iter(|| solve(black_box(&board)))
});
}
fn | (c: &mut Criterion) {
let board = Board::from(&[
[0, 2, 0, 0, 0, 0, 0, 0, 0], // row 1
[0, 0, 0, 6, 0, 0, 0, 0, 3], // row 2
[0, 7, 4, 0, 8, 0, 0, 0, 0], // row 3
[0, 0, 0, 0, 0, 3, 0, 0, 2], // row 4
[0, 8, 0, 0, 4, 0, 0, 1, 0], // row 5
[6, 0, 0, 5, 0, 0, 0, 0, 0], // row 6
[0, 0, 0, 0, 1, 0, 7, 8, 0], // row 7
[3, 0, 0, 0, 0, 9, 0, 0, 0], // row 8
[0, 0, 0, 0, 0, 0, 0, 4, 0], // row 9
]);
c.bench_function("hard unsolvable puzzle", |b| {
b.iter(|| solve(black_box(&board)))
});
}
criterion_group!(benches, hard_solvable_puzzle, hard_unsolvable_puzzle);
criterion_main!(benches);
| hard_unsolvable_puzzle |
genesis.go | package simulation
// DONTCOVER
import (
"encoding/json"
"fmt"
"math/rand"
"github.com/puneetsingh166/tm-load-test/types/module"
"github.com/puneetsingh166/tm-load-test/x/capability/types"
)
// Simulation parameter constants
const index = "index"
// GenIndex returns a random global index between 1-1000
func GenIndex(r *rand.Rand) uint64 {
return uint64(r.Int63n(1000)) + 1
}
// RandomizedGenState generates a random GenesisState for capability
func | (simState *module.SimulationState) {
var idx uint64
simState.AppParams.GetOrGenerate(
simState.Cdc, index, &idx, simState.Rand,
func(r *rand.Rand) { idx = GenIndex(r) },
)
capabilityGenesis := types.GenesisState{Index: idx}
bz, err := json.MarshalIndent(&capabilityGenesis, "", " ")
if err != nil {
panic(err)
}
fmt.Printf("Selected randomly generated %s parameters:\n%s\n", types.ModuleName, bz)
simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(&capabilityGenesis)
}
| RandomizedGenState |
party.py | """ Method for data manipulation in the mock db """
from flask import jsonify
from marshmallow import ValidationError
from app.api.model.party import Party
from app.api.util.dto import party_schema, parties_schema
from app.api.db.database import AppDatabase as db
def save_new_party(json_data):
"""saves a new party in the database
Args:
json_data (json) : party details
Returns:
json : api endpoint response
"""
# Deserialize the data input against the party schema
# check if input values throw validation errors
try:
data = party_schema.load(json_data)
except ValidationError as e:
return jsonify({
"status": 400,
"error": e.messages
}), 400
party_name = data['party_name']
hq_address = data['hq_address']
logo_url = data['logo_url']
# Query database for party by name
party_by_name = Party.get_party_by_name(party_name)
party = db().get_single_row(*party_by_name)
if party is None:
# if name is not taken
new_party = Party(
party_name=party_name,
hq_address=hq_address,
logo_url=logo_url
)
save_changes(new_party)
# 1. serialize the input for response
# 2. return serialized and proper format json to api endpoint
party_saved = db().get_single_row(*party_by_name)
response = party_schema.dump(party_saved)
response_object = jsonify({
"status": 201,
"data": [response]
})
return response_object, 201
# default response When name is taken
return jsonify({
"status": 409,
"error": "Try a different Party name, Provided name is taken."
}), 409
def get_party(_id):
"""Method to return the party from the database with the provided id
Args:
_id (integer): the party unique identifier
Returns:
1. json : the party found details in json format
2. json : error if the party is not found
"""
party_query = Party.get_party_by_id(_id)
party = db().get_single_row(*party_query)
if party:
# response when party exists
return jsonify({
"status": 200,
"data": [party_schema.dump(party)]
}), 200
else:
# response when party not found
return jsonify({
"status": 404,
"error": "Resource /parties/{} not found".format(_id)
}), 404
def get_parties():
"""Method to return all the parties from the database
Returns:
1. json : the parties found details in json format
"""
parties_query = Party.get_parties_query()
parties = db().get_all_rows(parties_query)
response = parties_schema.dump(parties)
return jsonify({
"status": 200,
"data": response
}), 200
def | (_id, json_data):
""" Method to apply new changes to party details """
try:
data = party_schema.load(json_data, partial=True)
except ValidationError as e:
return jsonify({
"status": 400,
"error": e.messages
}), 400
# check if the party with the provided party id exists.
party_to_edit_query = Party.get_party_by_id(_id)
party_to_edit = db().get_single_row(*party_to_edit_query)
if party_to_edit:
new_name = data["party_name"]
# check if the provided name already exists
party_by_name = Party.get_party_by_name(new_name)
party = db().get_single_row(*party_by_name)
if party is None:
# construct update party name query
query, values = Party.update_party(_id, new_name)
# persist changes to the database
db().commit_changes(query, values)
# query back the database for the edited party.
party_edited = db().get_single_row(*party_to_edit_query)
return jsonify({
"status": 200,
"data": [party_schema.dump(party_edited)]
})
# if party name is already registered
return jsonify({
"status": 409,
"error":
"Provided name is already taken or is the same for this party."
}), 409
# response when party not found
return jsonify({
"status": 404,
"error": "Resource requested for edit not found."
}), 404
def delete_party(_id):
"""delete the selected party
Returns:
1. json : response message o details in json format
"""
# check if party to delete exists
party_to_delete_query = Party.get_party_by_id(_id)
party_to_delete = db().get_single_row(*party_to_delete_query)
if party_to_delete:
# delete found party
query, value = Party.delete_party(_id)
db().commit_changes(query, value)
return jsonify({
"status": 200,
"data": [{
"message": "Political Party deleted successfully."
}]
}), 200
else:
# response message when delete fails.
return jsonify({
"status": 404,
"data": [{
"message": "Political Party to delete not found."
}]
}), 404
def save_changes(data):
""" Write to the mock db """
query, values = Party.add_party(data)
db().commit_changes(query, values)
| edit_party |
predator_prey_env.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simulate a predator prey environment.
Each agent can just observe itself (it's own identity) i.e. s_j = j and vision sqaure around it.
Design Decisions:
- Memory cheaper than time (compute)
- Using Vocab for class of box:
-1 out of bound,
indexing for predator agent (from 2?)
??? for prey agent (1 for fixed case, for now)
- Action Space & Observation Space are according to an agent
- Rewards -0.05 at each time step till the time
- Episode never ends
- Obs. State: Vocab of 1-hot < predator, preys & units >
"""
# core modules
import random
import math
import curses
# 3rd party modules | import numpy as np
from gym import spaces
class PredatorPreyEnv(gym.Env):
# metadata = {'render.modes': ['human']}
def __init__(self,):
self.__version__ = "0.0.1"
# TODO: better config handling
self.OUTSIDE_CLASS = 1
self.PREY_CLASS = 2
self.PREDATOR_CLASS = 3
self.TIMESTEP_PENALTY = -0.05
self.PREY_REWARD = 0
self.POS_PREY_REWARD = 0.05
self.episode_over = False
def init_curses(self):
self.stdscr = curses.initscr()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_YELLOW, -1)
curses.init_pair(3, curses.COLOR_CYAN, -1)
curses.init_pair(4, curses.COLOR_GREEN, -1)
def init_args(self, parser):
env = parser.add_argument_group('Prey Predator task')
env.add_argument('--nenemies', type=int, default=1,
help="Total number of preys in play")
env.add_argument('--dim', type=int, default=5,
help="Dimension of box")
env.add_argument('--vision', type=int, default=2,
help="Vision of predator")
env.add_argument('--moving_prey', action="store_true", default=False,
help="Whether prey is fixed or moving")
env.add_argument('--no_stay', action="store_true", default=False,
help="Whether predators have an action to stay in place")
parser.add_argument('--mode', default='mixed', type=str,
help='cooperative|competitive|mixed (default: mixed)')
env.add_argument('--enemy_comm', action="store_true", default=False,
help="Whether prey can communicate.")
def multi_agent_init(self, args):
# General variables defining the environment : CONFIG
params = ['dim', 'vision', 'moving_prey', 'mode', 'enemy_comm']
for key in params:
setattr(self, key, getattr(args, key))
self.nprey = args.nenemies
self.npredator = args.nfriendly
self.dims = dims = (self.dim, self.dim)
self.stay = not args.no_stay
if args.moving_prey:
raise NotImplementedError
# TODO
# (0: UP, 1: RIGHT, 2: DOWN, 3: LEFT, 4: STAY)
# Define what an agent can do -
if self.stay:
self.naction = 5
else:
self.naction = 4
self.action_space = spaces.MultiDiscrete([self.naction])
self.BASE = (dims[0] * dims[1])
self.OUTSIDE_CLASS += self.BASE
self.PREY_CLASS += self.BASE
self.PREDATOR_CLASS += self.BASE
# Setting max vocab size for 1-hot encoding
self.vocab_size = 1 + 1 + self.BASE + 1 + 1
# predator + prey + grid + outside
# Observation for each agent will be vision * vision ndarray
self.observation_space = spaces.Box(low=0, high=1, shape=(self.vocab_size, (2 * self.vision) + 1, (2 * self.vision) + 1), dtype=int)
# Actual observation will be of the shape 1 * npredator * (2v+1) * (2v+1) * vocab_size
return
def step(self, action):
"""
The agents take a step in the environment.
Parameters
----------
action : list/ndarray of length m, containing the indexes of what lever each 'm' chosen agents pulled.
Returns
-------
obs, reward, episode_over, info : tuple
obs (object) :
reward (float) : Ratio of Number of discrete levers pulled to total number of levers.
episode_over (bool) : Will be true as episode length is 1
info (dict) : diagnostic information useful for debugging.
"""
if self.episode_over:
raise RuntimeError("Episode is done")
action = np.array(action).squeeze()
action = np.atleast_1d(action)
for i, a in enumerate(action):
self._take_action(i, a)
assert np.all(action <= self.naction), "Actions should be in the range [0,naction)."
self.episode_over = False
self.obs = self._get_obs()
debug = {'predator_locs':self.predator_loc,'prey_locs':self.prey_loc}
return self.obs, self._get_reward(), self.episode_over, debug
def reset(self):
"""
Reset the state of the environment and returns an initial observation.
Returns
-------
observation (object): the initial observation of the space.
"""
self.episode_over = False
self.reached_prey = np.zeros(self.npredator)
# Locations
locs = self._get_cordinates()
self.predator_loc, self.prey_loc = locs[:self.npredator], locs[self.npredator:]
self._set_grid()
# stat - like success ratio
self.stat = dict()
# Observation will be npredator * vision * vision ndarray
self.obs = self._get_obs()
return self.obs
def seed(self):
return
def _get_cordinates(self):
idx = np.random.choice(np.prod(self.dims),(self.npredator + self.nprey), replace=False)
return np.vstack(np.unravel_index(idx, self.dims)).T
def _set_grid(self):
self.grid = np.arange(self.BASE).reshape(self.dims)
# Mark agents in grid
# self.grid[self.predator_loc[:,0], self.predator_loc[:,1]] = self.predator_ids
# self.grid[self.prey_loc[:,0], self.prey_loc[:,1]] = self.prey_ids
# Padding for vision
self.grid = np.pad(self.grid, self.vision, 'constant', constant_values = self.OUTSIDE_CLASS)
self.empty_bool_base_grid = self._onehot_initialization(self.grid)
def _get_obs(self):
self.bool_base_grid = self.empty_bool_base_grid.copy()
for i, p in enumerate(self.predator_loc):
self.bool_base_grid[p[0] + self.vision, p[1] + self.vision, self.PREDATOR_CLASS] += 1
for i, p in enumerate(self.prey_loc):
self.bool_base_grid[p[0] + self.vision, p[1] + self.vision, self.PREY_CLASS] += 1
obs = []
for p in self.predator_loc:
slice_y = slice(p[0], p[0] + (2 * self.vision) + 1)
slice_x = slice(p[1], p[1] + (2 * self.vision) + 1)
obs.append(self.bool_base_grid[slice_y, slice_x])
if self.enemy_comm:
for p in self.prey_loc:
slice_y = slice(p[0], p[0] + (2 * self.vision) + 1)
slice_x = slice(p[1], p[1] + (2 * self.vision) + 1)
obs.append(self.bool_base_grid[slice_y, slice_x])
obs = np.stack(obs)
return obs
def _take_action(self, idx, act):
# prey action
if idx >= self.npredator:
# fixed prey
if not self.moving_prey:
return
else:
raise NotImplementedError
if self.reached_prey[idx] == 1:
return
# STAY action
if act==5:
return
# UP
if act==0 and self.grid[max(0,
self.predator_loc[idx][0] + self.vision - 1),
self.predator_loc[idx][1] + self.vision] != self.OUTSIDE_CLASS:
self.predator_loc[idx][0] = max(0, self.predator_loc[idx][0]-1)
# RIGHT
elif act==1 and self.grid[self.predator_loc[idx][0] + self.vision,
min(self.dims[1] -1,
self.predator_loc[idx][1] + self.vision + 1)] != self.OUTSIDE_CLASS:
self.predator_loc[idx][1] = min(self.dims[1]-1,
self.predator_loc[idx][1]+1)
# DOWN
elif act==2 and self.grid[min(self.dims[0]-1,
self.predator_loc[idx][0] + self.vision + 1),
self.predator_loc[idx][1] + self.vision] != self.OUTSIDE_CLASS:
self.predator_loc[idx][0] = min(self.dims[0]-1,
self.predator_loc[idx][0]+1)
# LEFT
elif act==3 and self.grid[self.predator_loc[idx][0] + self.vision,
max(0,
self.predator_loc[idx][1] + self.vision - 1)] != self.OUTSIDE_CLASS:
self.predator_loc[idx][1] = max(0, self.predator_loc[idx][1]-1)
def _get_reward(self):
n = self.npredator if not self.enemy_comm else self.npredator + self.nprey
reward = np.full(n, self.TIMESTEP_PENALTY)
on_prey = np.where(np.all(self.predator_loc == self.prey_loc,axis=1))[0]
nb_predator_on_prey = on_prey.size
if self.mode == 'cooperative':
reward[on_prey] = self.POS_PREY_REWARD * nb_predator_on_prey
elif self.mode == 'competitive':
if nb_predator_on_prey:
reward[on_prey] = self.POS_PREY_REWARD / nb_predator_on_prey
elif self.mode == 'mixed':
reward[on_prey] = self.PREY_REWARD
else:
raise RuntimeError("Incorrect mode, Available modes: [cooperative|competitive|mixed]")
self.reached_prey[on_prey] = 1
if np.all(self.reached_prey == 1) and self.mode == 'mixed':
self.episode_over = True
# Prey reward
if nb_predator_on_prey == 0:
reward[self.npredator:] = -1 * self.TIMESTEP_PENALTY
else:
# TODO: discuss & finalise
reward[self.npredator:] = 0
# Success ratio
if self.mode != 'competitive':
if nb_predator_on_prey == self.npredator:
self.stat['success'] = 1
else:
self.stat['success'] = 0
return reward
def reward_terminal(self):
return np.zeros_like(self._get_reward())
def _onehot_initialization(self, a):
ncols = self.vocab_size
out = np.zeros(a.shape + (ncols,), dtype=int)
out[self._all_idx(a, axis=2)] = 1
return out
def _all_idx(self, idx, axis):
grid = np.ogrid[tuple(map(slice, idx.shape))]
grid.insert(axis, idx)
return tuple(grid)
def render(self, mode='human', close=False):
grid = np.zeros(self.BASE, dtype=object).reshape(self.dims)
self.stdscr.clear()
for p in self.predator_loc:
if grid[p[0]][p[1]] != 0:
grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'X'
else:
grid[p[0]][p[1]] = 'X'
for p in self.prey_loc:
if grid[p[0]][p[1]] != 0:
grid[p[0]][p[1]] = str(grid[p[0]][p[1]]) + 'P'
else:
grid[p[0]][p[1]] = 'P'
for row_num, row in enumerate(grid):
for idx, item in enumerate(row):
if item != 0:
if 'X' in item and 'P' in item:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(3))
elif 'X' in item:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(1))
else:
self.stdscr.addstr(row_num, idx * 4, item.center(3), curses.color_pair(2))
else:
self.stdscr.addstr(row_num, idx * 4, '0'.center(3), curses.color_pair(4))
self.stdscr.addstr(len(grid), 0, '\n')
self.stdscr.refresh()
def exit_render(self):
curses.endwin() | import gym |
error.rs | use super::{AllocId, ConstAlloc, Pointer, Scalar};
use crate::mir::interpret::ConstValue;
use crate::ty::{layout, query::TyCtxtAt, tls, FnSig, Ty};
use rustc_data_structures::sync::Lock;
use rustc_errors::{pluralize, struct_span_err, DiagnosticBuilder, ErrorReported}; | use std::{any::Any, backtrace::Backtrace, fmt};
#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
pub enum ErrorHandled {
/// Already reported an error for this evaluation, and the compilation is
/// *guaranteed* to fail. Warnings/lints *must not* produce `Reported`.
Reported(ErrorReported),
/// Already emitted a lint for this evaluation.
Linted,
/// Don't emit an error, the evaluation failed because the MIR was generic
/// and the substs didn't fully monomorphize it.
TooGeneric,
}
impl From<ErrorReported> for ErrorHandled {
fn from(err: ErrorReported) -> ErrorHandled {
ErrorHandled::Reported(err)
}
}
TrivialTypeFoldableAndLiftImpls! {
ErrorHandled,
}
pub type EvalToAllocationRawResult<'tcx> = Result<ConstAlloc<'tcx>, ErrorHandled>;
pub type EvalToConstValueResult<'tcx> = Result<ConstValue<'tcx>, ErrorHandled>;
pub fn struct_error<'tcx>(tcx: TyCtxtAt<'tcx>, msg: &str) -> DiagnosticBuilder<'tcx> {
struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg)
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(InterpErrorInfo<'_>, 8);
/// Packages the kind of error we got from the const code interpreter
/// up with a Rust-level backtrace of where the error occurred.
/// These should always be constructed by calling `.into()` on
/// a `InterpError`. In `rustc_mir::interpret`, we have `throw_err_*`
/// macros for this.
#[derive(Debug)]
pub struct InterpErrorInfo<'tcx>(Box<InterpErrorInfoInner<'tcx>>);
#[derive(Debug)]
struct InterpErrorInfoInner<'tcx> {
kind: InterpError<'tcx>,
backtrace: Option<Box<Backtrace>>,
}
impl fmt::Display for InterpErrorInfo<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0.kind)
}
}
impl InterpErrorInfo<'tcx> {
pub fn print_backtrace(&self) {
if let Some(backtrace) = self.0.backtrace.as_ref() {
print_backtrace(backtrace);
}
}
pub fn into_kind(self) -> InterpError<'tcx> {
let InterpErrorInfo(box InterpErrorInfoInner { kind, .. }) = self;
kind
}
#[inline]
pub fn kind(&self) -> &InterpError<'tcx> {
&self.0.kind
}
}
fn print_backtrace(backtrace: &Backtrace) {
eprintln!("\n\nAn error occurred in miri:\n{}", backtrace);
}
impl From<ErrorHandled> for InterpErrorInfo<'_> {
fn from(err: ErrorHandled) -> Self {
match err {
ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
err_inval!(ReferencedConstant)
}
ErrorHandled::TooGeneric => err_inval!(TooGeneric),
}
.into()
}
}
impl From<ErrorReported> for InterpErrorInfo<'_> {
fn from(err: ErrorReported) -> Self {
InterpError::InvalidProgram(InvalidProgramInfo::AlreadyReported(err)).into()
}
}
impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> {
fn from(kind: InterpError<'tcx>) -> Self {
let capture_backtrace = tls::with_opt(|tcx| {
if let Some(tcx) = tcx {
*Lock::borrow(&tcx.sess.ctfe_backtrace)
} else {
CtfeBacktrace::Disabled
}
});
let backtrace = match capture_backtrace {
CtfeBacktrace::Disabled => None,
CtfeBacktrace::Capture => Some(Box::new(Backtrace::force_capture())),
CtfeBacktrace::Immediate => {
// Print it now.
let backtrace = Backtrace::force_capture();
print_backtrace(&backtrace);
None
}
};
InterpErrorInfo(Box::new(InterpErrorInfoInner { kind, backtrace }))
}
}
/// Error information for when the program we executed turned out not to actually be a valid
/// program. This cannot happen in stand-alone Miri, but it can happen during CTFE/ConstProp
/// where we work on generic code or execution does not have all information available.
pub enum InvalidProgramInfo<'tcx> {
/// Resolution can fail if we are in a too generic context.
TooGeneric,
/// Cannot compute this constant because it depends on another one
/// which already produced an error.
ReferencedConstant,
/// Abort in case errors are already reported.
AlreadyReported(ErrorReported),
/// An error occurred during layout computation.
Layout(layout::LayoutError<'tcx>),
/// An invalid transmute happened.
TransmuteSizeDiff(Ty<'tcx>, Ty<'tcx>),
/// SizeOf of unsized type was requested.
SizeOfUnsizedType(Ty<'tcx>),
}
impl fmt::Display for InvalidProgramInfo<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use InvalidProgramInfo::*;
match self {
TooGeneric => write!(f, "encountered overly generic constant"),
ReferencedConstant => write!(f, "referenced constant has errors"),
AlreadyReported(ErrorReported) => {
write!(f, "encountered constants with type errors, stopping evaluation")
}
Layout(ref err) => write!(f, "{}", err),
TransmuteSizeDiff(from_ty, to_ty) => write!(
f,
"transmuting `{}` to `{}` is not possible, because these types do not have the same size",
from_ty, to_ty
),
SizeOfUnsizedType(ty) => write!(f, "size_of called on unsized type `{}`", ty),
}
}
}
/// Details of why a pointer had to be in-bounds.
#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
pub enum CheckInAllocMsg {
/// We are access memory.
MemoryAccessTest,
/// We are doing pointer arithmetic.
PointerArithmeticTest,
/// None of the above -- generic/unspecific inbounds test.
InboundsTest,
}
impl fmt::Display for CheckInAllocMsg {
/// When this is printed as an error the context looks like this
/// "{msg}pointer must be in-bounds at offset..."
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
match *self {
CheckInAllocMsg::MemoryAccessTest => "memory access failed: ",
CheckInAllocMsg::PointerArithmeticTest => "pointer arithmetic failed: ",
CheckInAllocMsg::InboundsTest => "",
}
)
}
}
/// Details of an access to uninitialized bytes where it is not allowed.
#[derive(Debug)]
pub struct UninitBytesAccess {
/// Location of the original memory access.
pub access_offset: Size,
/// Size of the original memory access.
pub access_size: Size,
/// Location of the first uninitialized byte that was accessed.
pub uninit_offset: Size,
/// Number of consecutive uninitialized bytes that were accessed.
pub uninit_size: Size,
}
/// Error information for when the program caused Undefined Behavior.
pub enum UndefinedBehaviorInfo<'tcx> {
/// Free-form case. Only for errors that are never caught!
Ub(String),
/// Unreachable code was executed.
Unreachable,
/// A slice/array index projection went out-of-bounds.
BoundsCheckFailed {
len: u64,
index: u64,
},
/// Something was divided by 0 (x / 0).
DivisionByZero,
/// Something was "remainded" by 0 (x % 0).
RemainderByZero,
/// Overflowing inbounds pointer arithmetic.
PointerArithOverflow,
/// Invalid metadata in a wide pointer (using `str` to avoid allocations).
InvalidMeta(&'static str),
/// Invalid drop function in vtable.
InvalidVtableDropFn(FnSig<'tcx>),
/// Invalid size in a vtable: too large.
InvalidVtableSize,
/// Invalid alignment in a vtable: too large, or not a power of 2.
InvalidVtableAlignment(String),
/// Reading a C string that does not end within its allocation.
UnterminatedCString(Pointer),
/// Dereferencing a dangling pointer after it got freed.
PointerUseAfterFree(AllocId),
/// Used a pointer outside the bounds it is valid for.
PointerOutOfBounds {
ptr: Pointer,
msg: CheckInAllocMsg,
allocation_size: Size,
},
/// Using an integer as a pointer in the wrong way.
DanglingIntPointer(u64, CheckInAllocMsg),
/// Used a pointer with bad alignment.
AlignmentCheckFailed {
required: Align,
has: Align,
},
/// Writing to read-only memory.
WriteToReadOnly(AllocId),
// Trying to access the data behind a function pointer.
DerefFunctionPointer(AllocId),
/// The value validity check found a problem.
/// Should only be thrown by `validity.rs` and always point out which part of the value
/// is the problem.
ValidationFailure {
/// The "path" to the value in question, e.g. `.0[5].field` for a struct
/// field in the 6th element of an array that is the first element of a tuple.
path: Option<String>,
msg: String,
},
/// Using a non-boolean `u8` as bool.
InvalidBool(u8),
/// Using a non-character `u32` as character.
InvalidChar(u32),
/// The tag of an enum does not encode an actual discriminant.
InvalidTag(Scalar),
/// Using a pointer-not-to-a-function as function pointer.
InvalidFunctionPointer(Pointer),
/// Using a string that is not valid UTF-8,
InvalidStr(std::str::Utf8Error),
/// Using uninitialized data where it is not allowed.
InvalidUninitBytes(Option<(AllocId, UninitBytesAccess)>),
/// Working with a local that is not currently live.
DeadLocal,
/// Data size is not equal to target size.
ScalarSizeMismatch {
target_size: u64,
data_size: u64,
},
}
impl fmt::Display for UndefinedBehaviorInfo<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use UndefinedBehaviorInfo::*;
match self {
Ub(msg) => write!(f, "{}", msg),
Unreachable => write!(f, "entering unreachable code"),
BoundsCheckFailed { ref len, ref index } => {
write!(f, "indexing out of bounds: the len is {} but the index is {}", len, index)
}
DivisionByZero => write!(f, "dividing by zero"),
RemainderByZero => write!(f, "calculating the remainder with a divisor of zero"),
PointerArithOverflow => write!(f, "overflowing in-bounds pointer arithmetic"),
InvalidMeta(msg) => write!(f, "invalid metadata in wide pointer: {}", msg),
InvalidVtableDropFn(sig) => write!(
f,
"invalid drop function signature: got {}, expected exactly one argument which must be a pointer type",
sig
),
InvalidVtableSize => {
write!(f, "invalid vtable: size is bigger than largest supported object")
}
InvalidVtableAlignment(msg) => write!(f, "invalid vtable: alignment {}", msg),
UnterminatedCString(p) => write!(
f,
"reading a null-terminated string starting at {} with no null found before end of allocation",
p,
),
PointerUseAfterFree(a) => {
write!(f, "pointer to {} was dereferenced after this allocation got freed", a)
}
PointerOutOfBounds { ptr, msg, allocation_size } => write!(
f,
"{}pointer must be in-bounds at offset {}, \
but is outside bounds of {} which has size {}",
msg,
ptr.offset.bytes(),
ptr.alloc_id,
allocation_size.bytes()
),
DanglingIntPointer(0, CheckInAllocMsg::InboundsTest) => {
write!(f, "null pointer is not a valid pointer for this operation")
}
DanglingIntPointer(i, msg) => {
write!(f, "{}0x{:x} is not a valid pointer", msg, i)
}
AlignmentCheckFailed { required, has } => write!(
f,
"accessing memory with alignment {}, but alignment {} is required",
has.bytes(),
required.bytes()
),
WriteToReadOnly(a) => write!(f, "writing to {} which is read-only", a),
DerefFunctionPointer(a) => write!(f, "accessing {} which contains a function", a),
ValidationFailure { path: None, msg } => write!(f, "type validation failed: {}", msg),
ValidationFailure { path: Some(path), msg } => {
write!(f, "type validation failed at {}: {}", path, msg)
}
InvalidBool(b) => {
write!(f, "interpreting an invalid 8-bit value as a bool: 0x{:02x}", b)
}
InvalidChar(c) => {
write!(f, "interpreting an invalid 32-bit value as a char: 0x{:08x}", c)
}
InvalidTag(val) => write!(f, "enum value has invalid tag: {}", val),
InvalidFunctionPointer(p) => {
write!(f, "using {} as function pointer but it does not point to a function", p)
}
InvalidStr(err) => write!(f, "this string is not valid UTF-8: {}", err),
InvalidUninitBytes(Some((alloc, access))) => write!(
f,
"reading {} byte{} of memory starting at {}, \
but {} byte{} {} uninitialized starting at {}, \
and this operation requires initialized memory",
access.access_size.bytes(),
pluralize!(access.access_size.bytes()),
Pointer::new(*alloc, access.access_offset),
access.uninit_size.bytes(),
pluralize!(access.uninit_size.bytes()),
if access.uninit_size.bytes() != 1 { "are" } else { "is" },
Pointer::new(*alloc, access.uninit_offset),
),
InvalidUninitBytes(None) => write!(
f,
"using uninitialized data, but this operation requires initialized memory"
),
DeadLocal => write!(f, "accessing a dead local variable"),
ScalarSizeMismatch { target_size, data_size } => write!(
f,
"scalar size mismatch: expected {} bytes but got {} bytes instead",
target_size, data_size
),
}
}
}
/// Error information for when the program did something that might (or might not) be correct
/// to do according to the Rust spec, but due to limitations in the interpreter, the
/// operation could not be carried out. These limitations can differ between CTFE and the
/// Miri engine, e.g., CTFE does not support dereferencing pointers at integral addresses.
pub enum UnsupportedOpInfo {
/// Free-form case. Only for errors that are never caught!
Unsupported(String),
/// Could not find MIR for a function.
NoMirFor(DefId),
/// Encountered a pointer where we needed raw bytes.
ReadPointerAsBytes,
//
// The variants below are only reachable from CTFE/const prop, miri will never emit them.
//
/// Encountered raw bytes where we needed a pointer.
ReadBytesAsPointer,
/// Accessing thread local statics
ThreadLocalStatic(DefId),
/// Accessing an unsupported extern static.
ReadExternStatic(DefId),
}
impl fmt::Display for UnsupportedOpInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use UnsupportedOpInfo::*;
match self {
Unsupported(ref msg) => write!(f, "{}", msg),
ReadExternStatic(did) => write!(f, "cannot read from extern static ({:?})", did),
NoMirFor(did) => write!(f, "no MIR body is available for {:?}", did),
ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes",),
ReadBytesAsPointer => write!(f, "unable to turn bytes into a pointer"),
ThreadLocalStatic(did) => write!(f, "cannot access thread local static ({:?})", did),
}
}
}
/// Error information for when the program exhausted the resources granted to it
/// by the interpreter.
pub enum ResourceExhaustionInfo {
/// The stack grew too big.
StackFrameLimitReached,
/// The program ran for too long.
///
/// The exact limit is set by the `const_eval_limit` attribute.
StepLimitReached,
}
impl fmt::Display for ResourceExhaustionInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ResourceExhaustionInfo::*;
match self {
StackFrameLimitReached => {
write!(f, "reached the configured maximum number of stack frames")
}
StepLimitReached => {
write!(f, "exceeded interpreter step limit (see `#[const_eval_limit]`)")
}
}
}
}
/// A trait to work around not having trait object upcasting.
pub trait AsAny: Any {
fn as_any(&self) -> &dyn Any;
}
impl<T: Any> AsAny for T {
#[inline(always)]
fn as_any(&self) -> &dyn Any {
self
}
}
/// A trait for machine-specific errors (or other "machine stop" conditions).
pub trait MachineStopType: AsAny + fmt::Display + Send {
/// If `true`, emit a hard error instead of going through the `CONST_ERR` lint
fn is_hard_err(&self) -> bool {
false
}
}
impl dyn MachineStopType {
#[inline(always)]
pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
self.as_any().downcast_ref()
}
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(InterpError<'_>, 64);
pub enum InterpError<'tcx> {
/// The program caused undefined behavior.
UndefinedBehavior(UndefinedBehaviorInfo<'tcx>),
/// The program did something the interpreter does not support (some of these *might* be UB
/// but the interpreter is not sure).
Unsupported(UnsupportedOpInfo),
/// The program was invalid (ill-typed, bad MIR, not sufficiently monomorphized, ...).
InvalidProgram(InvalidProgramInfo<'tcx>),
/// The program exhausted the interpreter's resources (stack/heap too big,
/// execution takes too long, ...).
ResourceExhaustion(ResourceExhaustionInfo),
/// Stop execution for a machine-controlled reason. This is never raised by
/// the core engine itself.
MachineStop(Box<dyn MachineStopType>),
}
pub type InterpResult<'tcx, T = ()> = Result<T, InterpErrorInfo<'tcx>>;
impl fmt::Display for InterpError<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use InterpError::*;
match *self {
Unsupported(ref msg) => write!(f, "{}", msg),
InvalidProgram(ref msg) => write!(f, "{}", msg),
UndefinedBehavior(ref msg) => write!(f, "{}", msg),
ResourceExhaustion(ref msg) => write!(f, "{}", msg),
MachineStop(ref msg) => write!(f, "{}", msg),
}
}
}
// Forward `Debug` to `Display`, so it does not look awful.
impl fmt::Debug for InterpError<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
impl InterpError<'_> {
/// Some errors do string formatting even if the error is never printed.
/// To avoid performance issues, there are places where we want to be sure to never raise these formatting errors,
/// so this method lets us detect them and `bug!` on unexpected errors.
pub fn formatted_string(&self) -> bool {
match self {
InterpError::Unsupported(UnsupportedOpInfo::Unsupported(_))
| InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ValidationFailure { .. })
| InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_)) => true,
_ => false,
}
}
} | use rustc_macros::HashStable;
use rustc_session::CtfeBacktrace;
use rustc_span::def_id::DefId;
use rustc_target::abi::{Align, Size}; |
app.py | """
login app
"""
from zoom.apps import App
class MyApp(App): | app = MyApp() | pass
|
test_bec.py | import ast
from bluesky.plans import scan, grid_scan
import bluesky.preprocessors as bpp
import bluesky.plan_stubs as bps
from bluesky.preprocessors import SupplementalData
from bluesky.callbacks.best_effort import BestEffortCallback
def test_hints(RE, hw):
motor = hw.motor
expected_hint = {'fields': [motor.name]}
assert motor.hints == expected_hint
collector = []
def collect(*args):
collector.append(args)
RE(scan([], motor, 1, 2, 2), {'descriptor': collect})
name, doc = collector.pop()
assert doc['hints'][motor.name] == expected_hint
def test_simple(RE, hw):
|
def test_disable(RE, hw):
det, motor = hw.ab_det, hw.motor
bec = BestEffortCallback()
RE.subscribe(bec)
bec.disable_table()
RE(scan([det], motor, 1, 5, 5))
assert bec._table is None
bec.enable_table()
RE(scan([det], motor, 1, 5, 5))
assert bec._table is not None
bec.peaks.com
bec.peaks['com']
assert ast.literal_eval(repr(bec.peaks)) == vars(bec.peaks)
bec.clear()
assert bec._table is None
# smoke test
bec.disable_plots()
bec.enable_plots()
bec.disable_baseline()
bec.enable_baseline()
bec.disable_heading()
bec.enable_heading()
def test_blank_hints(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5, md={'hints': {}}))
def test_with_baseline(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
sd = SupplementalData(baseline=[hw.det])
RE.preprocessors.append(sd)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5))
def test_underhinted_plan(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
@bpp.run_decorator()
def broken_plan(dets):
yield from bps.trigger_and_read(dets)
RE(broken_plan([hw.det]))
def test_live_grid(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
RE(grid_scan([hw.det4], hw.motor1, 0, 1, 1, hw.motor2, 0, 1, 2, True))
| bec = BestEffortCallback()
RE.subscribe(bec)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5)) |
ExpireNodeV1.js | // @link http://schemas.gdbots.io/json-schema/gdbots/ncr/command/expire-node/1-0-0.json#
import Fb from '@gdbots/pbj/FieldBuilder.js';
import Format from '@gdbots/pbj/enums/Format.js';
import GdbotsPbjxCommandV1Mixin from '@gdbots/schemas/gdbots/pbjx/mixin/command/CommandV1Mixin.js';
import Message from '@gdbots/pbj/Message.js';
import Schema from '@gdbots/pbj/Schema.js';
import T from '@gdbots/pbj/types/index.js';
export default class | extends Message {
/**
* @private
*
* @returns {Schema}
*/
static defineSchema() {
return new Schema(this.SCHEMA_ID, this,
[
Fb.create('command_id', T.TimeUuidType.create())
.required()
.build(),
Fb.create('occurred_at', T.MicrotimeType.create())
.build(),
/*
* Used to perform optimistic concurrency control.
* @link https://en.wikipedia.org/wiki/HTTP_ETag
*/
Fb.create('expected_etag', T.StringType.create())
.maxLength(100)
.pattern('^[\\w\\.:-]+$')
.build(),
/*
* Multi-tenant apps can use this field to track the tenant id.
*/
Fb.create('ctx_tenant_id', T.StringType.create())
.pattern('^[\\w\\/\\.:-]+$')
.build(),
/*
* The "ctx_retries" field is used to keep track of how many attempts were
* made to process this command. In some cases, the service or transport
* that handles the command may be down or an optimistic check has failed
* and is being retried.
*/
Fb.create('ctx_retries', T.TinyIntType.create())
.build(),
/*
* The "ctx_causator" is the actual causator object that "ctx_causator_ref"
* refers to. In some cases it's useful for command handlers to copy the
* causator into the command. For example, when a node is being updated we
* may want to know what the node will be after the update. We can derive
* this via the causator instead of requesting the node and engaging a race
* condition.
*/
Fb.create('ctx_causator', T.MessageType.create())
.build(),
Fb.create('ctx_causator_ref', T.MessageRefType.create())
.build(),
Fb.create('ctx_correlator_ref', T.MessageRefType.create())
.build(),
Fb.create('ctx_user_ref', T.MessageRefType.create())
.build(),
/*
* The "ctx_app" refers to the application used to send the command. This is
* different from ctx_ua (user_agent) because the agent used (Safari, Firefox)
* is not necessarily the app used (cms, iOS app, website)
*/
Fb.create('ctx_app', T.MessageType.create())
.anyOfCuries([
'gdbots:contexts::app',
])
.build(),
/*
* The "ctx_cloud" is set by the server receiving the command and is generally
* only used internally for tracking and performance monitoring.
*/
Fb.create('ctx_cloud', T.MessageType.create())
.anyOfCuries([
'gdbots:contexts::cloud',
])
.build(),
Fb.create('ctx_ip', T.StringType.create())
.format(Format.IPV4)
.overridable(true)
.build(),
Fb.create('ctx_ipv6', T.StringType.create())
.format(Format.IPV6)
.overridable(true)
.build(),
Fb.create('ctx_ua', T.TextType.create())
.overridable(true)
.build(),
/*
* An optional message/reason for the command being sent.
* Consider this like a git commit message.
*/
Fb.create('ctx_msg', T.TextType.create())
.build(),
/*
* Tags is a map that categorizes data or tracks references in
* external systems. The tags names should be consistent and descriptive,
* e.g. fb_user_id:123, salesforce_customer_id:456.
*/
Fb.create('tags', T.StringType.create())
.asAMap()
.pattern('^[\\w\\/\\.:-]+$')
.build(),
Fb.create('node_ref', T.NodeRefType.create())
.required()
.build(),
],
this.MIXINS,
);
}
}
const M = ExpireNodeV1;
M.prototype.SCHEMA_ID = M.SCHEMA_ID = 'pbj:gdbots:ncr:command:expire-node:1-0-0';
M.prototype.SCHEMA_CURIE = M.SCHEMA_CURIE = 'gdbots:ncr:command:expire-node';
M.prototype.SCHEMA_CURIE_MAJOR = M.SCHEMA_CURIE_MAJOR = 'gdbots:ncr:command:expire-node:v1';
M.prototype.MIXINS = M.MIXINS = [
'gdbots:pbjx:mixin:command:v1',
'gdbots:pbjx:mixin:command',
'gdbots:common:mixin:taggable:v1',
'gdbots:common:mixin:taggable',
];
GdbotsPbjxCommandV1Mixin(M);
Object.freeze(M);
Object.freeze(M.prototype);
| ExpireNodeV1 |
dbus.go | package dbus
import (
"errors"
"reflect"
"strings"
)
var (
byteType = reflect.TypeOf(byte(0))
boolType = reflect.TypeOf(false)
uint8Type = reflect.TypeOf(uint8(0))
int16Type = reflect.TypeOf(int16(0))
uint16Type = reflect.TypeOf(uint16(0))
int32Type = reflect.TypeOf(int32(0))
uint32Type = reflect.TypeOf(uint32(0))
int64Type = reflect.TypeOf(int64(0))
uint64Type = reflect.TypeOf(uint64(0))
float64Type = reflect.TypeOf(float64(0))
stringType = reflect.TypeOf("")
signatureType = reflect.TypeOf(Signature{""})
objectPathType = reflect.TypeOf(ObjectPath(""))
variantType = reflect.TypeOf(Variant{Signature{""}, nil})
interfacesType = reflect.TypeOf([]interface{}{})
unixFDType = reflect.TypeOf(UnixFD(0))
unixFDIndexType = reflect.TypeOf(UnixFDIndex(0))
)
// An InvalidTypeError signals that a value which cannot be represented in the
// D-Bus wire format was passed to a function.
type InvalidTypeError struct {
Type reflect.Type
}
func (e InvalidTypeError) Error() string {
return "dbus: invalid type " + e.Type.String()
}
// Store copies the values contained in src to dest, which must be a slice of
// pointers. It converts slices of interfaces from src to corresponding structs
// in dest. An error is returned if the lengths of src and dest or the types of
// their elements don't match.
func Store(src []interface{}, dest ...interface{}) error {
if len(src) != len(dest) {
return errors.New("dbus.Store: length mismatch")
}
for i := range src {
if err := store(src[i], dest[i]); err != nil {
return err
}
}
return nil
}
func store(src, dest interface{}) error {
if reflect.TypeOf(dest).Elem() == reflect.TypeOf(src) {
reflect.ValueOf(dest).Elem().Set(reflect.ValueOf(src))
return nil
} else if hasStruct(dest) {
rv := reflect.ValueOf(dest)
if rv.Kind() == reflect.Interface || rv.Kind() == reflect.Ptr {
rv = rv.Elem()
}
switch rv.Kind() {
case reflect.Struct:
vs, ok := src.([]interface{})
if !ok {
return errors.New("dbus.Store: type mismatch")
}
t := rv.Type()
ndest := make([]interface{}, 0, rv.NumField())
for i := 0; i < rv.NumField(); i++ {
field := t.Field(i)
if field.PkgPath == "" && field.Tag.Get("dbus") != "-" {
ndest = append(ndest, rv.Field(i).Addr().Interface())
}
}
if len(vs) != len(ndest) {
return errors.New("dbus.Store: type mismatch")
}
err := Store(vs, ndest...)
if err != nil {
return errors.New("dbus.Store: type mismatch")
}
case reflect.Slice:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Slice {
return errors.New("dbus.Store: type mismatch")
}
rv.Set(reflect.MakeSlice(rv.Type(), sv.Len(), sv.Len()))
for i := 0; i < sv.Len(); i++ {
if err := store(sv.Index(i).Interface(), rv.Index(i).Addr().Interface()); err != nil {
return err
}
}
case reflect.Map:
sv := reflect.ValueOf(src)
if sv.Kind() != reflect.Map {
return errors.New("dbus.Store: type mismatch")
}
keys := sv.MapKeys()
rv.Set(reflect.MakeMap(sv.Type()))
for _, key := range keys {
v := reflect.New(sv.Type().Elem())
if err := store(v, sv.MapIndex(key).Interface()); err != nil {
return err
}
rv.SetMapIndex(key, v.Elem())
}
default:
return errors.New("dbus.Store: type mismatch")
}
return nil
} else {
return errors.New("dbus.Store: type mismatch")
}
}
func hasStruct(v interface{}) bool {
t := reflect.TypeOf(v)
for {
switch t.Kind() {
case reflect.Struct:
return true
case reflect.Slice, reflect.Ptr, reflect.Map:
t = t.Elem()
default:
return false
}
}
}
// An ObjectPath is an object path as defined by the D-Bus spec.
type ObjectPath string
// IsValid returns whether the object path is valid.
func (o ObjectPath) IsValid() bool {
s := string(o)
if len(s) == 0 {
return false
}
if s[0] != '/' {
return false
}
if s[len(s)-1] == '/' && len(s) != 1 {
return false
}
// probably not used, but technically possible
if s == "/" {
return true
}
split := strings.Split(s[1:], "/")
for _, v := range split {
if len(v) == 0 {
return false
}
for _, c := range v {
if !isMemberChar(c) {
return false
}
}
}
return true
}
// A UnixFD is a Unix file descriptor sent over the wire. See the package-level
// documentation for more information about Unix file descriptor passsing.
type UnixFD int32
// A UnixFDIndex is the representation of a Unix file descriptor in a message.
type UnixFDIndex uint32
// alignment returns the alignment of values of type t.
func alignment(t reflect.Type) int {
switch t {
case variantType:
return 1
case objectPathType:
return 4
case signatureType:
return 1
case interfacesType: // sometimes used for structs
return 8
}
switch t.Kind() {
case reflect.Uint8:
return 1
case reflect.Uint16, reflect.Int16:
return 2
case reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map:
return 4
case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct:
return 8
case reflect.Ptr:
return alignment(t.Elem())
}
return 1
}
// isKeyType returns whether t is a valid type for a D-Bus dict.
func isKeyType(t reflect.Type) bool {
switch t.Kind() {
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64,
reflect.String:
return true
}
return false
}
// isValidInterface returns whether s is a valid name for an interface.
func isValidInterface(s string) bool {
if len(s) == 0 || len(s) > 255 || s[0] == '.' {
return false
}
elem := strings.Split(s, ".")
if len(elem) < 2 {
return false
}
for _, v := range elem {
if len(v) == 0 {
return false
}
if v[0] >= '0' && v[0] <= '9' {
return false
}
for _, c := range v {
if !isMemberChar(c) {
return false
}
}
}
return true
}
// isValidMember returns whether s is a valid name for a member.
func isValidMember(s string) bool {
if len(s) == 0 || len(s) > 255 {
return false
}
i := strings.Index(s, ".")
if i != -1 {
return false
}
if s[0] >= '0' && s[0] <= '9' |
for _, c := range s {
if !isMemberChar(c) {
return false
}
}
return true
}
func isMemberChar(c rune) bool {
return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') ||
(c >= 'a' && c <= 'z') || c == '_'
}
| {
return false
} |
timestring.go | package timestring | } |
func SumOfTimes(times string) string {
return "0:0:0" |
canvas2svg.js | /*!!
* Canvas 2 Svg v1.0.19
* A low level canvas to SVG converter. Uses a mock canvas context to build an SVG document.
*
* Licensed under the MIT license:
* http://www.opensource.org/licenses/mit-license.php
*
* Author:
* Kerry Liu
*
* Copyright (c) 2014 Gliffy Inc.
*/
;(function () {
"use strict";
var STYLES, ctx, CanvasGradient, CanvasPattern, namedEntities;
//helper function to format a string
function format(str, args) {
var keys = Object.keys(args), i;
for (i=0; i<keys.length; i++) {
str = str.replace(new RegExp("\\{" + keys[i] + "\\}", "gi"), args[keys[i]]);
}
return str;
}
//helper function that generates a random string
function randomString(holder) {
var chars, randomstring, i;
if (!holder) {
throw new Error("cannot create a random attribute name for an undefined object");
}
chars = "ABCDEFGHIJKLMNOPQRSTUVWXTZabcdefghiklmnopqrstuvwxyz";
randomstring = "";
do {
randomstring = "";
for (i = 0; i < 12; i++) {
randomstring += chars[Math.floor(Math.random() * chars.length)];
}
} while (holder[randomstring]);
return randomstring;
}
//helper function to map named to numbered entities
function createNamedToNumberedLookup(items, radix) {
var i, entity, lookup = {}, base10, base16;
items = items.split(',');
radix = radix || 10;
// Map from named to numbered entities.
for (i = 0; i < items.length; i += 2) {
entity = '&' + items[i + 1] + ';';
base10 = parseInt(items[i], radix);
lookup[entity] = '&#'+base10+';';
}
//FF and IE need to create a regex from hex values ie == \xa0
lookup["\\xa0"] = ' ';
return lookup;
}
//helper function to map canvas-textAlign to svg-textAnchor
function getTextAnchor(textAlign) {
//TODO: support rtl languages
var mapping = {"left":"start", "right":"end", "center":"middle", "start":"start", "end":"end"};
return mapping[textAlign] || mapping.start;
}
//helper function to map canvas-textBaseline to svg-dominantBaseline
function | (textBaseline) {
//INFO: not supported in all browsers
var mapping = {"alphabetic": "alphabetic", "hanging": "hanging", "top":"text-before-edge", "bottom":"text-after-edge", "middle":"central"};
return mapping[textBaseline] || mapping.alphabetic;
}
// Unpack entities lookup where the numbers are in radix 32 to reduce the size
// entity mapping courtesy of tinymce
namedEntities = createNamedToNumberedLookup(
'50,nbsp,51,iexcl,52,cent,53,pound,54,curren,55,yen,56,brvbar,57,sect,58,uml,59,copy,' +
'5a,ordf,5b,laquo,5c,not,5d,shy,5e,reg,5f,macr,5g,deg,5h,plusmn,5i,sup2,5j,sup3,5k,acute,' +
'5l,micro,5m,para,5n,middot,5o,cedil,5p,sup1,5q,ordm,5r,raquo,5s,frac14,5t,frac12,5u,frac34,' +
'5v,iquest,60,Agrave,61,Aacute,62,Acirc,63,Atilde,64,Auml,65,Aring,66,AElig,67,Ccedil,' +
'68,Egrave,69,Eacute,6a,Ecirc,6b,Euml,6c,Igrave,6d,Iacute,6e,Icirc,6f,Iuml,6g,ETH,6h,Ntilde,' +
'6i,Ograve,6j,Oacute,6k,Ocirc,6l,Otilde,6m,Ouml,6n,times,6o,Oslash,6p,Ugrave,6q,Uacute,' +
'6r,Ucirc,6s,Uuml,6t,Yacute,6u,THORN,6v,szlig,70,agrave,71,aacute,72,acirc,73,atilde,74,auml,' +
'75,aring,76,aelig,77,ccedil,78,egrave,79,eacute,7a,ecirc,7b,euml,7c,igrave,7d,iacute,7e,icirc,' +
'7f,iuml,7g,eth,7h,ntilde,7i,ograve,7j,oacute,7k,ocirc,7l,otilde,7m,ouml,7n,divide,7o,oslash,' +
'7p,ugrave,7q,uacute,7r,ucirc,7s,uuml,7t,yacute,7u,thorn,7v,yuml,ci,fnof,sh,Alpha,si,Beta,' +
'sj,Gamma,sk,Delta,sl,Epsilon,sm,Zeta,sn,Eta,so,Theta,sp,Iota,sq,Kappa,sr,Lambda,ss,Mu,' +
'st,Nu,su,Xi,sv,Omicron,t0,Pi,t1,Rho,t3,Sigma,t4,Tau,t5,Upsilon,t6,Phi,t7,Chi,t8,Psi,' +
't9,Omega,th,alpha,ti,beta,tj,gamma,tk,delta,tl,epsilon,tm,zeta,tn,eta,to,theta,tp,iota,' +
'tq,kappa,tr,lambda,ts,mu,tt,nu,tu,xi,tv,omicron,u0,pi,u1,rho,u2,sigmaf,u3,sigma,u4,tau,' +
'u5,upsilon,u6,phi,u7,chi,u8,psi,u9,omega,uh,thetasym,ui,upsih,um,piv,812,bull,816,hellip,' +
'81i,prime,81j,Prime,81u,oline,824,frasl,88o,weierp,88h,image,88s,real,892,trade,89l,alefsym,' +
'8cg,larr,8ch,uarr,8ci,rarr,8cj,darr,8ck,harr,8dl,crarr,8eg,lArr,8eh,uArr,8ei,rArr,8ej,dArr,' +
'8ek,hArr,8g0,forall,8g2,part,8g3,exist,8g5,empty,8g7,nabla,8g8,isin,8g9,notin,8gb,ni,8gf,prod,' +
'8gh,sum,8gi,minus,8gn,lowast,8gq,radic,8gt,prop,8gu,infin,8h0,ang,8h7,and,8h8,or,8h9,cap,8ha,cup,' +
'8hb,int,8hk,there4,8hs,sim,8i5,cong,8i8,asymp,8j0,ne,8j1,equiv,8j4,le,8j5,ge,8k2,sub,8k3,sup,8k4,' +
'nsub,8k6,sube,8k7,supe,8kl,oplus,8kn,otimes,8l5,perp,8m5,sdot,8o8,lceil,8o9,rceil,8oa,lfloor,8ob,' +
'rfloor,8p9,lang,8pa,rang,9ea,loz,9j0,spades,9j3,clubs,9j5,hearts,9j6,diams,ai,OElig,aj,oelig,b0,' +
'Scaron,b1,scaron,bo,Yuml,m6,circ,ms,tilde,802,ensp,803,emsp,809,thinsp,80c,zwnj,80d,zwj,80e,lrm,' +
'80f,rlm,80j,ndash,80k,mdash,80o,lsquo,80p,rsquo,80q,sbquo,80s,ldquo,80t,rdquo,80u,bdquo,810,dagger,' +
'811,Dagger,81g,permil,81p,lsaquo,81q,rsaquo,85c,euro', 32);
//Some basic mappings for attributes and default values.
STYLES = {
"strokeStyle":{
svgAttr : "stroke", //corresponding svg attribute
canvas : "#000000", //canvas default
svg : "none", //svg default
apply : "stroke" //apply on stroke() or fill()
},
"fillStyle":{
svgAttr : "fill",
canvas : "#000000",
svg : null, //svg default is black, but we need to special case this to handle canvas stroke without fill
apply : "fill"
},
"lineCap":{
svgAttr : "stroke-linecap",
canvas : "butt",
svg : "butt",
apply : "stroke"
},
"lineJoin":{
svgAttr : "stroke-linejoin",
canvas : "miter",
svg : "miter",
apply : "stroke"
},
"miterLimit":{
svgAttr : "stroke-miterlimit",
canvas : 10,
svg : 4,
apply : "stroke"
},
"lineWidth":{
svgAttr : "stroke-width",
canvas : 1,
svg : 1,
apply : "stroke"
},
"globalAlpha": {
svgAttr : "opacity",
canvas : 1,
svg : 1,
apply : "fill stroke"
},
"font":{
//font converts to multiple svg attributes, there is custom logic for this
canvas : "10px sans-serif"
},
"shadowColor":{
canvas : "#000000"
},
"shadowOffsetX":{
canvas : 0
},
"shadowOffsetY":{
canvas : 0
},
"shadowBlur":{
canvas : 0
},
"textAlign":{
canvas : "start"
},
"textBaseline":{
canvas : "alphabetic"
},
"lineDash" : {
svgAttr : "stroke-dasharray",
canvas : [],
svg : null,
apply : "stroke"
}
};
/**
*
* @param gradientNode - reference to the gradient
* @constructor
*/
CanvasGradient = function (gradientNode, ctx) {
this.__root = gradientNode;
this.__ctx = ctx;
};
/**
* Adds a color stop to the gradient root
*/
CanvasGradient.prototype.addColorStop = function (offset, color) {
var stop = this.__ctx.__createElement("stop"), regex, matches;
stop.setAttribute("offset", offset);
if (color.indexOf("rgba") !== -1) {
//separate alpha value, since webkit can't handle it
regex = /rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d?\.?\d*)\s*\)/gi;
matches = regex.exec(color);
stop.setAttribute("stop-color", format("rgb({r},{g},{b})", {r:matches[1], g:matches[2], b:matches[3]}));
stop.setAttribute("stop-opacity", matches[4]);
} else {
stop.setAttribute("stop-color", color);
}
this.__root.appendChild(stop);
};
CanvasPattern = function (pattern, ctx) {
this.__root = pattern;
this.__ctx = ctx;
};
/**
* The mock canvas context
* @param o - options include:
* ctx - existing Context2D to wrap around
* width - width of your canvas (defaults to 500)
* height - height of your canvas (defaults to 500)
* enableMirroring - enables canvas mirroring (get image data) (defaults to false)
* document - the document object (defaults to the current document)
*/
ctx = function (o) {
var defaultOptions = { width:500, height:500, enableMirroring : false}, options;
//keep support for this way of calling C2S: new C2S(width,height)
if (arguments.length > 1) {
options = defaultOptions;
options.width = arguments[0];
options.height = arguments[1];
} else if ( !o ) {
options = defaultOptions;
} else {
options = o;
}
if (!(this instanceof ctx)) {
//did someone call this without new?
return new ctx(options);
}
//setup options
this.width = options.width || defaultOptions.width;
this.height = options.height || defaultOptions.height;
this.enableMirroring = options.enableMirroring !== undefined ? options.enableMirroring : defaultOptions.enableMirroring;
this.canvas = this; ///point back to this instance!
this.__document = options.document || document;
// allow passing in an existing context to wrap around
// if a context is passed in, we know a canvas already exist
if (options.ctx) {
this.__ctx = options.ctx;
} else {
this.__canvas = this.__document.createElement("canvas");
this.__ctx = this.__canvas.getContext("2d");
}
this.__setDefaultStyles();
this.__stack = [this.__getStyleState()];
this.__groupStack = [];
//the root svg element
this.__root = this.__document.createElementNS("http://www.w3.org/2000/svg", "svg");
this.__root.setAttribute("version", 1.1);
this.__root.setAttribute("xmlns", "http://www.w3.org/2000/svg");
this.__root.setAttributeNS("http://www.w3.org/2000/xmlns/", "xmlns:xlink", "http://www.w3.org/1999/xlink");
this.__root.setAttribute("width", this.width);
this.__root.setAttribute("height", this.height);
//make sure we don't generate the same ids in defs
this.__ids = {};
//defs tag
this.__defs = this.__document.createElementNS("http://www.w3.org/2000/svg", "defs");
this.__root.appendChild(this.__defs);
//also add a group child. the svg element can't use the transform attribute
this.__currentElement = this.__document.createElementNS("http://www.w3.org/2000/svg", "g");
this.__root.appendChild(this.__currentElement);
};
/**
* Creates the specified svg element
* @private
*/
ctx.prototype.__createElement = function (elementName, properties, resetFill) {
if (typeof properties === "undefined") {
properties = {};
}
var element = this.__document.createElementNS("http://www.w3.org/2000/svg", elementName),
keys = Object.keys(properties), i, key;
if (resetFill) {
//if fill or stroke is not specified, the svg element should not display. By default SVG's fill is black.
element.setAttribute("fill", "none");
element.setAttribute("stroke", "none");
}
for (i=0; i<keys.length; i++) {
key = keys[i];
element.setAttribute(key, properties[key]);
}
return element;
};
/**
* Applies default canvas styles to the context
* @private
*/
ctx.prototype.__setDefaultStyles = function () {
//default 2d canvas context properties see:http://www.w3.org/TR/2dcontext/
var keys = Object.keys(STYLES), i, key;
for (i=0; i<keys.length; i++) {
key = keys[i];
this[key] = STYLES[key].canvas;
}
};
/**
* Applies styles on restore
* @param styleState
* @private
*/
ctx.prototype.__applyStyleState = function (styleState) {
var keys = Object.keys(styleState), i, key;
for (i=0; i<keys.length; i++) {
key = keys[i];
this[key] = styleState[key];
}
};
/**
* Gets the current style state
* @return {Object}
* @private
*/
ctx.prototype.__getStyleState = function () {
var i, styleState = {}, keys = Object.keys(STYLES), key;
for (i=0; i<keys.length; i++) {
key = keys[i];
styleState[key] = this[key];
}
return styleState;
};
/**
* Apples the current styles to the current SVG element. On "ctx.fill" or "ctx.stroke"
* @param type
* @private
*/
ctx.prototype.__applyStyleToCurrentElement = function (type) {
var currentElement = this.__currentElement;
var currentStyleGroup = this.__currentElementsToStyle;
if (currentStyleGroup) {
currentElement.setAttribute(type, "");
currentElement = currentStyleGroup.element;
currentStyleGroup.children.forEach(function (node) {
node.setAttribute(type, "");
})
}
var keys = Object.keys(STYLES), i, style, value, id, regex, matches;
for (i = 0; i < keys.length; i++) {
style = STYLES[keys[i]];
value = this[keys[i]];
if (style.apply) {
//is this a gradient or pattern?
if (value instanceof CanvasPattern) {
//pattern
if (value.__ctx) {
//copy over defs
while(value.__ctx.__defs.childNodes.length) {
id = value.__ctx.__defs.childNodes[0].getAttribute("id");
this.__ids[id] = id;
this.__defs.appendChild(value.__ctx.__defs.childNodes[0]);
}
}
currentElement.setAttribute(style.apply, format("url(#{id})", {id:value.__root.getAttribute("id")}));
}
else if (value instanceof CanvasGradient) {
//gradient
currentElement.setAttribute(style.apply, format("url(#{id})", {id:value.__root.getAttribute("id")}));
} else if (style.apply.indexOf(type)!==-1 && style.svg !== value) {
if ((style.svgAttr === "stroke" || style.svgAttr === "fill") && value.indexOf("rgba") !== -1) {
//separate alpha value, since illustrator can't handle it
regex = /rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d?\.?\d*)\s*\)/gi;
matches = regex.exec(value);
currentElement.setAttribute(style.svgAttr, format("rgb({r},{g},{b})", {r:matches[1], g:matches[2], b:matches[3]}));
//should take globalAlpha here
var opacity = matches[4];
var globalAlpha = this.globalAlpha;
if (globalAlpha != null) {
opacity *= globalAlpha;
}
currentElement.setAttribute(style.svgAttr+"-opacity", opacity);
} else {
var attr = style.svgAttr;
if (keys[i] === 'globalAlpha') {
attr = type+'-'+style.svgAttr;
if (currentElement.getAttribute(attr)) {
//fill-opacity or stroke-opacity has already been set by stroke or fill.
continue;
}
}
//otherwise only update attribute if right type, and not svg default
currentElement.setAttribute(attr, value);
}
}
}
}
};
/**
* Will return the closest group or svg node. May return the current element.
* @private
*/
ctx.prototype.__closestGroupOrSvg = function (node) {
node = node || this.__currentElement;
if (node.nodeName === "g" || node.nodeName === "svg") {
return node;
} else {
return this.__closestGroupOrSvg(node.parentNode);
}
};
/**
* Returns the serialized value of the svg so far
* @param fixNamedEntities - Standalone SVG doesn't support named entities, which document.createTextNode encodes.
* If true, we attempt to find all named entities and encode it as a numeric entity.
* @return serialized svg
*/
ctx.prototype.getSerializedSvg = function (fixNamedEntities) {
var serialized = new XMLSerializer().serializeToString(this.__root),
keys, i, key, value, regexp, xmlns;
//IE search for a duplicate xmnls because they didn't implement setAttributeNS correctly
xmlns = /xmlns="http:\/\/www\.w3\.org\/2000\/svg".+xmlns="http:\/\/www\.w3\.org\/2000\/svg/gi;
if (xmlns.test(serialized)) {
serialized = serialized.replace('xmlns="http://www.w3.org/2000/svg','xmlns:xlink="http://www.w3.org/1999/xlink');
}
if (fixNamedEntities) {
keys = Object.keys(namedEntities);
//loop over each named entity and replace with the proper equivalent.
for (i=0; i<keys.length; i++) {
key = keys[i];
value = namedEntities[key];
regexp = new RegExp(key, "gi");
if (regexp.test(serialized)) {
serialized = serialized.replace(regexp, value);
}
}
}
return serialized;
};
/**
* Returns the root svg
* @return
*/
ctx.prototype.getSvg = function () {
return this.__root;
};
/**
* Will generate a group tag.
*/
ctx.prototype.save = function () {
var group = this.__createElement("g");
var parent = this.__closestGroupOrSvg();
this.__groupStack.push(parent);
parent.appendChild(group);
this.__currentElement = group;
this.__stack.push(this.__getStyleState());
};
/**
* Sets current element to parent, or just root if already root
*/
ctx.prototype.restore = function () {
this.__currentElement = this.__groupStack.pop();
this.__currentElementsToStyle = null;
//Clearing canvas will make the poped group invalid, currentElement is set to the root group node.
if (!this.__currentElement) {
this.__currentElement = this.__root.childNodes[1];
}
var state = this.__stack.pop();
this.__applyStyleState(state);
};
/**
* Helper method to add transform
* @private
*/
ctx.prototype.__addTransform = function (t) {
//if the current element has siblings, add another group
var parent = this.__closestGroupOrSvg();
if (parent.childNodes.length > 0) {
if (this.__currentElement.nodeName === "path") {
if (!this.__currentElementsToStyle) this.__currentElementsToStyle = {element: parent, children: []};
this.__currentElementsToStyle.children.push(this.__currentElement)
this.__applyCurrentDefaultPath();
}
var group = this.__createElement("g");
parent.appendChild(group);
this.__currentElement = group;
}
var transform = this.__currentElement.getAttribute("transform");
if (transform) {
transform += " ";
} else {
transform = "";
}
transform += t;
this.__currentElement.setAttribute("transform", transform);
};
/**
* scales the current element
*/
ctx.prototype.scale = function (x, y) {
if (y === undefined) {
y = x;
}
this.__addTransform(format("scale({x},{y})", {x:x, y:y}));
};
/**
* rotates the current element
*/
ctx.prototype.rotate = function (angle) {
var degrees = (angle * 180 / Math.PI);
this.__addTransform(format("rotate({angle},{cx},{cy})", {angle:degrees, cx:0, cy:0}));
};
/**
* translates the current element
*/
ctx.prototype.translate = function (x, y) {
this.__addTransform(format("translate({x},{y})", {x:x,y:y}));
};
/**
* applies a transform to the current element
*/
ctx.prototype.transform = function (a, b, c, d, e, f) {
this.__addTransform(format("matrix({a},{b},{c},{d},{e},{f})", {a:a, b:b, c:c, d:d, e:e, f:f}));
};
/**
* Create a new Path Element
*/
ctx.prototype.beginPath = function () {
var path, parent;
// Note that there is only one current default path, it is not part of the drawing state.
// See also: https://html.spec.whatwg.org/multipage/scripting.html#current-default-path
this.__currentDefaultPath = "";
this.__currentPosition = {};
path = this.__createElement("path", {}, true);
parent = this.__closestGroupOrSvg();
parent.appendChild(path);
this.__currentElement = path;
};
/**
* Helper function to apply currentDefaultPath to current path element
* @private
*/
ctx.prototype.__applyCurrentDefaultPath = function () {
var currentElement = this.__currentElement;
if (currentElement.nodeName === "path") {
currentElement.setAttribute("d", this.__currentDefaultPath);
} else {
console.error("Attempted to apply path command to node", currentElement.nodeName);
}
};
/**
* Helper function to add path command
* @private
*/
ctx.prototype.__addPathCommand = function (command) {
this.__currentDefaultPath += " ";
this.__currentDefaultPath += command;
};
/**
* Adds the move command to the current path element,
* if the currentPathElement is not empty create a new path element
*/
ctx.prototype.moveTo = function (x,y) {
if (this.__currentElement.nodeName !== "path") {
this.beginPath();
}
// creates a new subpath with the given point
this.__currentPosition = {x: x, y: y};
this.__addPathCommand(format("M {x} {y}", {x:x, y:y}));
};
/**
* Closes the current path
*/
ctx.prototype.closePath = function () {
if (this.__currentDefaultPath) {
this.__addPathCommand("Z");
}
};
/**
* Adds a line to command
*/
ctx.prototype.lineTo = function (x, y) {
this.__currentPosition = {x: x, y: y};
if (this.__currentDefaultPath.indexOf('M') > -1) {
this.__addPathCommand(format("L {x} {y}", {x:x, y:y}));
} else {
this.__addPathCommand(format("M {x} {y}", {x:x, y:y}));
}
};
/**
* Add a bezier command
*/
ctx.prototype.bezierCurveTo = function (cp1x, cp1y, cp2x, cp2y, x, y) {
this.__currentPosition = {x: x, y: y};
this.__addPathCommand(format("C {cp1x} {cp1y} {cp2x} {cp2y} {x} {y}",
{cp1x:cp1x, cp1y:cp1y, cp2x:cp2x, cp2y:cp2y, x:x, y:y}));
};
/**
* Adds a quadratic curve to command
*/
ctx.prototype.quadraticCurveTo = function (cpx, cpy, x, y) {
this.__currentPosition = {x: x, y: y};
this.__addPathCommand(format("Q {cpx} {cpy} {x} {y}", {cpx:cpx, cpy:cpy, x:x, y:y}));
};
/**
* Return a new normalized vector of given vector
*/
var normalize = function (vector) {
var len = Math.sqrt(vector[0] * vector[0] + vector[1] * vector[1]);
return [vector[0] / len, vector[1] / len];
};
/**
* Adds the arcTo to the current path
*
* @see http://www.w3.org/TR/2015/WD-2dcontext-20150514/#dom-context-2d-arcto
*/
ctx.prototype.arcTo = function (x1, y1, x2, y2, radius) {
// Let the point (x0, y0) be the last point in the subpath.
var x0 = this.__currentPosition && this.__currentPosition.x;
var y0 = this.__currentPosition && this.__currentPosition.y;
// First ensure there is a subpath for (x1, y1).
if (typeof x0 == "undefined" || typeof y0 == "undefined") {
return;
}
// Negative values for radius must cause the implementation to throw an IndexSizeError exception.
if (radius < 0) {
throw new Error("IndexSizeError: The radius provided (" + radius + ") is negative.");
}
// If the point (x0, y0) is equal to the point (x1, y1),
// or if the point (x1, y1) is equal to the point (x2, y2),
// or if the radius radius is zero,
// then the method must add the point (x1, y1) to the subpath,
// and connect that point to the previous point (x0, y0) by a straight line.
if (((x0 === x1) && (y0 === y1))
|| ((x1 === x2) && (y1 === y2))
|| (radius === 0)) {
this.lineTo(x1, y1);
return;
}
// Otherwise, if the points (x0, y0), (x1, y1), and (x2, y2) all lie on a single straight line,
// then the method must add the point (x1, y1) to the subpath,
// and connect that point to the previous point (x0, y0) by a straight line.
var unit_vec_p1_p0 = normalize([x0 - x1, y0 - y1]);
var unit_vec_p1_p2 = normalize([x2 - x1, y2 - y1]);
if (unit_vec_p1_p0[0] * unit_vec_p1_p2[1] === unit_vec_p1_p0[1] * unit_vec_p1_p2[0]) {
this.lineTo(x1, y1);
return;
}
// Otherwise, let The Arc be the shortest arc given by circumference of the circle that has radius radius,
// and that has one point tangent to the half-infinite line that crosses the point (x0, y0) and ends at the point (x1, y1),
// and that has a different point tangent to the half-infinite line that ends at the point (x1, y1), and crosses the point (x2, y2).
// The points at which this circle touches these two lines are called the start and end tangent points respectively.
// note that both vectors are unit vectors, so the length is 1
var cos = (unit_vec_p1_p0[0] * unit_vec_p1_p2[0] + unit_vec_p1_p0[1] * unit_vec_p1_p2[1]);
var theta = Math.acos(Math.abs(cos));
// Calculate origin
var unit_vec_p1_origin = normalize([
unit_vec_p1_p0[0] + unit_vec_p1_p2[0],
unit_vec_p1_p0[1] + unit_vec_p1_p2[1]
]);
var len_p1_origin = radius / Math.sin(theta / 2);
var x = x1 + len_p1_origin * unit_vec_p1_origin[0];
var y = y1 + len_p1_origin * unit_vec_p1_origin[1];
// Calculate start angle and end angle
// rotate 90deg clockwise (note that y axis points to its down)
var unit_vec_origin_start_tangent = [
-unit_vec_p1_p0[1],
unit_vec_p1_p0[0]
];
// rotate 90deg counter clockwise (note that y axis points to its down)
var unit_vec_origin_end_tangent = [
unit_vec_p1_p2[1],
-unit_vec_p1_p2[0]
];
var getAngle = function (vector) {
// get angle (clockwise) between vector and (1, 0)
var x = vector[0];
var y = vector[1];
if (y >= 0) { // note that y axis points to its down
return Math.acos(x);
} else {
return -Math.acos(x);
}
};
var startAngle = getAngle(unit_vec_origin_start_tangent);
var endAngle = getAngle(unit_vec_origin_end_tangent);
// Connect the point (x0, y0) to the start tangent point by a straight line
this.lineTo(x + unit_vec_origin_start_tangent[0] * radius,
y + unit_vec_origin_start_tangent[1] * radius);
// Connect the start tangent point to the end tangent point by arc
// and adding the end tangent point to the subpath.
this.arc(x, y, radius, startAngle, endAngle);
};
/**
* Sets the stroke property on the current element
*/
ctx.prototype.stroke = function () {
if (this.__currentElement.nodeName === "path") {
this.__currentElement.setAttribute("paint-order", "fill stroke markers");
}
this.__applyCurrentDefaultPath();
this.__applyStyleToCurrentElement("stroke");
};
/**
* Sets fill properties on the current element
*/
ctx.prototype.fill = function () {
if (this.__currentElement.nodeName === "path") {
this.__currentElement.setAttribute("paint-order", "stroke fill markers");
}
this.__applyCurrentDefaultPath();
this.__applyStyleToCurrentElement("fill");
};
/**
* Adds a rectangle to the path.
*/
ctx.prototype.rect = function (x, y, width, height) {
if (this.__currentElement.nodeName !== "path") {
this.beginPath();
}
this.moveTo(x, y);
this.lineTo(x+width, y);
this.lineTo(x+width, y+height);
this.lineTo(x, y+height);
this.lineTo(x, y);
this.closePath();
};
/**
* adds a rectangle element
*/
ctx.prototype.fillRect = function (x, y, width, height) {
var rect, parent;
rect = this.__createElement("rect", {
x : x,
y : y,
width : width,
height : height
}, true);
parent = this.__closestGroupOrSvg();
parent.appendChild(rect);
this.__currentElement = rect;
this.__applyStyleToCurrentElement("fill");
};
/**
* Draws a rectangle with no fill
* @param x
* @param y
* @param width
* @param height
*/
ctx.prototype.strokeRect = function (x, y, width, height) {
var rect, parent;
rect = this.__createElement("rect", {
x : x,
y : y,
width : width,
height : height
}, true);
parent = this.__closestGroupOrSvg();
parent.appendChild(rect);
this.__currentElement = rect;
this.__applyStyleToCurrentElement("stroke");
};
/**
* Clear entire canvas:
* 1. save current transforms
* 2. remove all the childNodes of the root g element
*/
ctx.prototype.__clearCanvas = function () {
var current = this.__closestGroupOrSvg(),
transform = current.getAttribute("transform");
var rootGroup = this.__root.childNodes[1];
var childNodes = rootGroup.childNodes;
for (var i = childNodes.length - 1; i >= 0; i--) {
if (childNodes[i]) {
rootGroup.removeChild(childNodes[i]);
}
}
this.__currentElement = rootGroup;
//reset __groupStack as all the child group nodes are all removed.
this.__groupStack = [];
if (transform) {
this.__addTransform(transform);
}
};
/**
* "Clears" a canvas by just drawing a white rectangle in the current group.
*/
ctx.prototype.clearRect = function (x, y, width, height) {
//clear entire canvas
if (x === 0 && y === 0 && width === this.width && height === this.height) {
this.__clearCanvas();
return;
}
var rect, parent = this.__closestGroupOrSvg();
rect = this.__createElement("rect", {
x : x,
y : y,
width : width,
height : height,
fill : "#FFFFFF"
}, true);
parent.appendChild(rect);
};
/**
* Adds a linear gradient to a defs tag.
* Returns a canvas gradient object that has a reference to it's parent def
*/
ctx.prototype.createLinearGradient = function (x1, y1, x2, y2) {
var grad = this.__createElement("linearGradient", {
id : randomString(this.__ids),
x1 : x1+"px",
x2 : x2+"px",
y1 : y1+"px",
y2 : y2+"px",
"gradientUnits" : "userSpaceOnUse"
}, false);
this.__defs.appendChild(grad);
return new CanvasGradient(grad, this);
};
/**
* Adds a radial gradient to a defs tag.
* Returns a canvas gradient object that has a reference to it's parent def
*/
ctx.prototype.createRadialGradient = function (x0, y0, r0, x1, y1, r1) {
var grad = this.__createElement("radialGradient", {
id : randomString(this.__ids),
cx : x1+"px",
cy : y1+"px",
r : r1+"px",
fx : x0+"px",
fy : y0+"px",
"gradientUnits" : "userSpaceOnUse"
}, false);
this.__defs.appendChild(grad);
return new CanvasGradient(grad, this);
};
/**
* Parses the font string and returns svg mapping
* @private
*/
ctx.prototype.__parseFont = function () {
var regex = /^\s*(?=(?:(?:[-a-z]+\s*){0,2}(italic|oblique))?)(?=(?:(?:[-a-z]+\s*){0,2}(small-caps))?)(?=(?:(?:[-a-z]+\s*){0,2}(bold(?:er)?|lighter|[1-9]00))?)(?:(?:normal|\1|\2|\3)\s*){0,3}((?:xx?-)?(?:small|large)|medium|smaller|larger|[.\d]+(?:\%|in|[cem]m|ex|p[ctx]))(?:\s*\/\s*(normal|[.\d]+(?:\%|in|[cem]m|ex|p[ctx])))?\s*([-,\'\"\sa-z0-9]+?)\s*$/i;
var fontPart = regex.exec( this.font );
var data = {
style : fontPart[1] || 'normal',
size : fontPart[4] || '10px',
family : fontPart[6] || 'sans-serif',
weight: fontPart[3] || 'normal',
decoration : fontPart[2] || 'normal',
href : null
};
//canvas doesn't support underline natively, but we can pass this attribute
if (this.__fontUnderline === "underline") {
data.decoration = "underline";
}
//canvas also doesn't support linking, but we can pass this as well
if (this.__fontHref) {
data.href = this.__fontHref;
}
return data;
};
/**
* Helper to link text fragments
* @param font
* @param element
* @return {*}
* @private
*/
ctx.prototype.__wrapTextLink = function (font, element) {
if (font.href) {
var a = this.__createElement("a");
a.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href", font.href);
a.appendChild(element);
return a;
}
return element;
};
/**
* Fills or strokes text
* @param text
* @param x
* @param y
* @param action - stroke or fill
* @private
*/
ctx.prototype.__applyText = function (text, x, y, action) {
var font = this.__parseFont(),
parent = this.__closestGroupOrSvg(),
textElement = this.__createElement("text", {
"font-family" : font.family,
"font-size" : font.size,
"font-style" : font.style,
"font-weight" : font.weight,
"text-decoration" : font.decoration,
"x" : x,
"y" : y,
"text-anchor": getTextAnchor(this.textAlign),
"dominant-baseline": getDominantBaseline(this.textBaseline)
}, true);
textElement.appendChild(this.__document.createTextNode(text));
this.__currentElement = textElement;
this.__applyStyleToCurrentElement(action);
parent.appendChild(this.__wrapTextLink(font,textElement));
};
/**
* Creates a text element
* @param text
* @param x
* @param y
*/
ctx.prototype.fillText = function (text, x, y) {
this.__applyText(text, x, y, "fill");
};
/**
* Strokes text
* @param text
* @param x
* @param y
*/
ctx.prototype.strokeText = function (text, x, y) {
this.__applyText(text, x, y, "stroke");
};
/**
* No need to implement this for svg.
* @param text
* @return {TextMetrics}
*/
ctx.prototype.measureText = function (text) {
this.__ctx.font = this.font;
return this.__ctx.measureText(text);
};
/**
* Arc command!
*/
ctx.prototype.arc = function (x, y, radius, startAngle, endAngle, counterClockwise) {
// in canvas no circle is drawn if no angle is provided.
if (startAngle === endAngle) {
return;
}
startAngle = startAngle % (2*Math.PI);
endAngle = endAngle % (2*Math.PI);
if (startAngle === endAngle) {
//circle time! subtract some of the angle so svg is happy (svg elliptical arc can't draw a full circle)
endAngle = ((endAngle + (2*Math.PI)) - 0.001 * (counterClockwise ? -1 : 1)) % (2*Math.PI);
}
var endX = x+radius*Math.cos(endAngle),
endY = y+radius*Math.sin(endAngle),
startX = x+radius*Math.cos(startAngle),
startY = y+radius*Math.sin(startAngle),
sweepFlag = counterClockwise ? 0 : 1,
largeArcFlag = 0,
diff = endAngle - startAngle;
// https://github.com/gliffy/canvas2svg/issues/4
if (diff < 0) {
diff += 2*Math.PI;
}
if (counterClockwise) {
largeArcFlag = diff > Math.PI ? 0 : 1;
} else {
largeArcFlag = diff > Math.PI ? 1 : 0;
}
this.lineTo(startX, startY);
this.__addPathCommand(format("A {rx} {ry} {xAxisRotation} {largeArcFlag} {sweepFlag} {endX} {endY}",
{rx:radius, ry:radius, xAxisRotation:0, largeArcFlag:largeArcFlag, sweepFlag:sweepFlag, endX:endX, endY:endY}));
this.__currentPosition = {x: endX, y: endY};
};
/**
* Generates a ClipPath from the clip command.
*/
ctx.prototype.clip = function () {
var group = this.__closestGroupOrSvg(),
clipPath = this.__createElement("clipPath"),
id = randomString(this.__ids),
newGroup = this.__createElement("g");
this.__applyCurrentDefaultPath();
group.removeChild(this.__currentElement);
clipPath.setAttribute("id", id);
clipPath.appendChild(this.__currentElement);
this.__defs.appendChild(clipPath);
//set the clip path to this group
group.setAttribute("clip-path", format("url(#{id})", {id:id}));
//clip paths can be scaled and transformed, we need to add another wrapper group to avoid later transformations
// to this path
group.appendChild(newGroup);
this.__currentElement = newGroup;
};
/**
* Draws a canvas, image or mock context to this canvas.
* Note that all svg dom manipulation uses node.childNodes rather than node.children for IE support.
* http://www.whatwg.org/specs/web-apps/current-work/multipage/the-canvas-element.html#dom-context-2d-drawimage
*/
ctx.prototype.drawImage = function () {
//convert arguments to a real array
var args = Array.prototype.slice.call(arguments),
image=args[0],
dx, dy, dw, dh, sx=0, sy=0, sw, sh, parent, svg, defs, group,
currentElement, svgImage, canvas, context, id;
if (args.length === 3) {
dx = args[1];
dy = args[2];
sw = image.width;
sh = image.height;
dw = sw;
dh = sh;
} else if (args.length === 5) {
dx = args[1];
dy = args[2];
dw = args[3];
dh = args[4];
sw = image.width;
sh = image.height;
} else if (args.length === 9) {
sx = args[1];
sy = args[2];
sw = args[3];
sh = args[4];
dx = args[5];
dy = args[6];
dw = args[7];
dh = args[8];
} else {
throw new Error("Invalid number of arguments passed to drawImage: " + arguments.length);
}
parent = this.__closestGroupOrSvg();
currentElement = this.__currentElement;
var translateDirective = "translate(" + dx + ", " + dy + ")";
if (image instanceof ctx) {
//canvas2svg mock canvas context. In the future we may want to clone nodes instead.
//also I'm currently ignoring dw, dh, sw, sh, sx, sy for a mock context.
svg = image.getSvg().cloneNode(true);
if (svg.childNodes && svg.childNodes.length > 1) {
defs = svg.childNodes[0];
while(defs.childNodes.length) {
id = defs.childNodes[0].getAttribute("id");
this.__ids[id] = id;
this.__defs.appendChild(defs.childNodes[0]);
}
group = svg.childNodes[1];
if (group) {
//save original transform
var originTransform = group.getAttribute("transform");
var transformDirective;
if (originTransform) {
transformDirective = originTransform+" "+translateDirective;
} else {
transformDirective = translateDirective;
}
group.setAttribute("transform", transformDirective);
parent.appendChild(group);
}
}
} else if (image.nodeName === "CANVAS" || image.nodeName === "IMG") {
//canvas or image
svgImage = this.__createElement("image");
svgImage.setAttribute("width", dw);
svgImage.setAttribute("height", dh);
svgImage.setAttribute("preserveAspectRatio", "none");
if (sx || sy || sw !== image.width || sh !== image.height) {
//crop the image using a temporary canvas
canvas = this.__document.createElement("canvas");
canvas.width = dw;
canvas.height = dh;
context = canvas.getContext("2d");
context.drawImage(image, sx, sy, sw, sh, 0, 0, dw, dh);
image = canvas;
}
svgImage.setAttribute("transform", translateDirective);
svgImage.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href",
image.nodeName === "CANVAS" ? image.toDataURL() : image.getAttribute("src"));
parent.appendChild(svgImage);
}
};
/**
* Generates a pattern tag
*/
ctx.prototype.createPattern = function (image, repetition) {
var pattern = this.__document.createElementNS("http://www.w3.org/2000/svg", "pattern"), id = randomString(this.__ids),
img;
pattern.setAttribute("id", id);
pattern.setAttribute("width", image.width);
pattern.setAttribute("height", image.height);
if (image.nodeName === "CANVAS" || image.nodeName === "IMG") {
img = this.__document.createElementNS("http://www.w3.org/2000/svg", "image");
img.setAttribute("width", image.width);
img.setAttribute("height", image.height);
img.setAttributeNS("http://www.w3.org/1999/xlink", "xlink:href",
image.nodeName === "CANVAS" ? image.toDataURL() : image.getAttribute("src"));
pattern.appendChild(img);
this.__defs.appendChild(pattern);
} else if (image instanceof ctx) {
pattern.appendChild(image.__root.childNodes[1]);
this.__defs.appendChild(pattern);
}
return new CanvasPattern(pattern, this);
};
ctx.prototype.setLineDash = function (dashArray) {
if (dashArray && dashArray.length > 0) {
this.lineDash = dashArray.join(",");
} else {
this.lineDash = null;
}
};
/**
* Not yet implemented
*/
ctx.prototype.drawFocusRing = function () {};
ctx.prototype.createImageData = function () {};
ctx.prototype.getImageData = function () {};
ctx.prototype.putImageData = function () {};
ctx.prototype.globalCompositeOperation = function () {};
ctx.prototype.setTransform = function () {};
//add options for alternative namespace
if (typeof window === "object") {
window.C2S = ctx;
}
// CommonJS/Browserify
if (typeof module === "object" && typeof module.exports === "object") {
module.exports = ctx;
}
}());
| getDominantBaseline |
unban_user.rs | //! [POST /_matrix/client/r0/rooms/{roomId}/unban](https://matrix.org/docs/spec/client_server/r0.6.1#post-matrix-client-r0-rooms-roomid-unban)
use ruma_api::ruma_api;
use ruma_identifiers::{RoomId, UserId};
ruma_api! {
metadata: {
description: "Unban a user from a room.",
method: POST,
name: "unban_user",
path: "/_matrix/client/r0/rooms/:room_id/unban",
rate_limited: false,
authentication: AccessToken,
}
request: {
/// The room to unban the user from.
#[ruma_api(path)]
pub room_id: &'a RoomId,
/// The user to unban.
pub user_id: &'a UserId,
/// Optional reason for unbanning the user.
#[cfg(feature = "unstable-pre-spec")]
#[serde(skip_serializing_if = "Option::is_none")]
pub reason: Option<&'a str>,
}
#[derive(Default)]
response: {}
error: crate::Error
}
impl<'a> Request<'a> {
/// Creates a new `Request` with the given room id and room id.
pub fn new(room_id: &'a RoomId, user_id: &'a UserId) -> Self |
}
impl Response {
/// Creates an empty `Response`.
pub fn new() -> Self {
Self {}
}
}
| {
Self {
room_id,
user_id,
#[cfg(feature = "unstable-pre-spec")]
reason: None,
}
} |
ex9_special_pythagorean_triplet.py | """
Special Pythagorean triplet.
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a**2 + b**2 = c**2
For example, 3**2 + 4**2 = 9 + 16 = 25 = 5**2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
from datetime import datetime
from functools import wraps
def time_delta(func):
|
def gen_num():
n = 1
while True:
yield n
n += 1
@time_delta
def pythagorean_triplet(r):
for b in gen_num():
for a in range(1, b):
c = (a**2 + b**2)**(1/2)
if c % 1 == 0:
if (a+b+int(c)) == r:
return a, b, int(c)
a, b, c = pythagorean_triplet(1000)
res = a * b * c
print(res) | wraps(func)
def inner(*args):
t_init = datetime.now()
ret = func(*args)
t_final = datetime.now() - t_init
print(f'{t_final.seconds}s | {t_final.microseconds}us')
return ret
return inner |
models.py | # -*- coding: utf-8 -*-
from django.db import models
from ..core.models import TimeStampedModel, ModelStatus
from ..edificios.models import Edificios
class Contactos(TimeStampedModel, ModelStatus):
"""
Contáctos de un edificio
"""
edificio = models.ForeignKey(Edificios, verbose_name='Edificio')
nombre = models.CharField(blank=False,
null=False,
verbose_name='Nombre completo',
max_length=150)
piso = models.IntegerField(blank=True,
verbose_name='Piso donde vive',
max_length=2,
help_text='Piso del edificio, ejemplo: 1, 3, 5, 10')
departamento = models.CharField(blank=True, max_length=2, help_text='Ej: A, B, D')
telefono = models.CharField(max_length=50, blank=True)
comentario = models.TextField(blank=True)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = 'Contáctos'
verbose_name_plural = 'Contáctos'
class HorariosContactos(TimeStampedModel):
"""
Modelo para registrar el horario de
los contactos del edificio
"""
contacto = models.ForeignKey(Contactos)
lunes = models.BooleanField(default=False, verbose_name='Lunes')
martes = models.BooleanField(default=False, verbose_name='Martes')
miercoles = models.BooleanField(default=False, verbose_name='Miércoles')
jueves = models.BooleanField(default=False, verbose_name='Jueves')
viernes = models.BooleanField(default=False, verbose_name='Viernes')
sabado = models.BooleanField(default=False, verbose_name='Sábado')
domingo = models.BooleanField(default=False, verbose_name='Domingo')
hora_desde = models.TimeField(blank=False, null=False)
hora_hasta = models.TimeField(blank=False, null=False)
def __unicode__(self):
return self.edificio.nombre
class Meta:
verbo | se_name = 'Horarios de contácto'
verbose_name_plural = 'Horarios de contácto'
|
|
controller.go | /*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by injection-gen. DO NOT EDIT.
package revision
import (
context "context"
fmt "fmt"
reflect "reflect"
strings "strings"
corev1 "k8s.io/api/core/v1"
labels "k8s.io/apimachinery/pkg/labels"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
scheme "k8s.io/client-go/kubernetes/scheme"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
record "k8s.io/client-go/tools/record"
kubeclient "knative.dev/pkg/client/injection/kube/client"
controller "knative.dev/pkg/controller"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
versionedscheme "knative.dev/serving/pkg/client/clientset/versioned/scheme"
client "knative.dev/serving/pkg/client/injection/client"
revision "knative.dev/serving/pkg/client/injection/informers/serving/v1/revision"
)
const (
defaultControllerAgentName = "revision-controller"
defaultFinalizerName = "revisions.serving.knative.dev"
)
// NewImpl returns a controller.Impl that handles queuing and feeding work from
// the queue through an implementation of controller.Reconciler, delegating to
// the provided Interface and optional Finalizer methods. OptionsFn is used to return
// controller.Options to be used but the internal reconciler.
func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl {
logger := logging.FromContext(ctx)
// Check the options function input. It should be 0 or 1.
if len(optionsFns) > 1 {
logger.Fatalf("up to one options function is supported, found %d", len(optionsFns))
}
revisionInformer := revision.Get(ctx)
lister := revisionInformer.Lister()
rec := &reconcilerImpl{
LeaderAwareFuncs: reconciler.LeaderAwareFuncs{
PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error {
all, err := lister.List(labels.Everything())
if err != nil {
return err
}
for _, elt := range all {
// TODO: Consider letting users specify a filter in options.
enq(bkt, types.NamespacedName{
Namespace: elt.GetNamespace(),
Name: elt.GetName(),
})
}
return nil
},
},
Client: client.Get(ctx),
Lister: lister,
reconciler: r,
finalizerName: defaultFinalizerName,
}
t := reflect.TypeOf(r).Elem()
queueName := fmt.Sprintf("%s.%s", strings.ReplaceAll(t.PkgPath(), "/", "-"), t.Name())
impl := controller.NewImpl(rec, logger, queueName)
agentName := defaultControllerAgentName
// Pass impl to the options. Save any optional results.
for _, fn := range optionsFns {
opts := fn(impl)
if opts.ConfigStore != nil {
rec.configStore = opts.ConfigStore
}
if opts.FinalizerName != "" {
rec.finalizerName = opts.FinalizerName
}
if opts.AgentName != "" {
agentName = opts.AgentName
}
}
rec.Recorder = createRecorder(ctx, agentName)
return impl
}
func createRecorder(ctx context.Context, agentName string) record.EventRecorder {
logger := logging.FromContext(ctx)
recorder := controller.GetEventRecorder(ctx)
if recorder == nil {
// Create event broadcaster
logger.Debug("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
watches := []watch.Interface{
eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof),
eventBroadcaster.StartRecordingToSink(
&v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}),
}
recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName})
go func() {
<-ctx.Done()
for _, w := range watches {
w.Stop()
}
}()
}
return recorder
}
func | () {
versionedscheme.AddToScheme(scheme.Scheme)
}
| init |
pm_zap.go | package pm_zap
import (
"context"
"fmt"
"time"
"cloud.google.com/go/pubsub"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/zap/ctxzap"
"github.com/k-yomo/pm"
pm_logging "github.com/k-yomo/pm/middleware/logging"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// SubscriptionInterceptor returns a subscription interceptor that optionally logs the subscription process.
func SubscriptionInterceptor(logger *zap.Logger, opt ...Option) pm.SubscriptionInterceptor {
opts := &options{
shouldLog: pm_logging.DefaultLogDecider,
messageProducer: DefaultMessageProducer,
timestampFormat: time.RFC3339,
}
for _, o := range opt {
o.apply(opts)
}
return func(info *pm.SubscriptionInfo, next pm.MessageHandler) pm.MessageHandler {
return func(ctx context.Context, m *pubsub.Message) error {
startTime := time.Now()
newCtx := newLoggerForProcess(ctx, logger, info, startTime, opts.timestampFormat)
err := next(newCtx, m)
if opts.shouldLog(info, err) |
return err
}
}
}
func newLoggerForProcess(ctx context.Context, logger *zap.Logger, info *pm.SubscriptionInfo, start time.Time, timestampFormat string) context.Context {
var fields []zapcore.Field
fields = append(fields, zap.String("pubsub.start_time", start.Format(timestampFormat)))
if d, ok := ctx.Deadline(); ok {
fields = append(fields, zap.String("pubsub.deadline", d.Format(timestampFormat)))
}
fields = append(fields, zap.String("pubsub.topic_id", info.TopicID), zap.String("pubsub.subscription_id", info.SubscriptionID))
return ctxzap.ToContext(ctx, logger.With(fields...))
}
| {
opts.messageProducer(
newCtx, fmt.Sprintf("finished processing message '%s'", m.ID),
err,
time.Since(startTime),
)
} |
qread.ts | import prompt = require("prompt");
prompt.delimiter = "";
prompt.message = "> ";
var queue = [];
// This is the read lib that uses Q instead of callbacks.
export function | (name: string, message: string, silent: boolean = false): Promise<string> {
let promise = new Promise<string>((resolve, reject) => {
let schema: prompt.PromptSchema = {
properties: { }
};
schema.properties[name] = {
required: true,
description: message + ":",
hidden: silent
};
Promise.all(queue.filter(x => x !== promise)).then(() => {
prompt.start();
prompt.get(schema, (err, result) => {
if (err) {
reject(err);
} else {
resolve(result[name]);
}
queue.shift();
});
});
});
queue.unshift(promise);
return <any>promise;
} | read |
binned.rs | // The GUI parts conditioned by the "example-gui" feature
// depend on the gtk crate, which is an interface to the
// native GTK libs.
// Although, the other parts which are not conditioned
// altogether demonstrate the basic use of this crate.
extern crate waveform;
#[cfg(feature = "example-gui")]
extern crate gtk;
#[cfg(feature = "example-gui")]
extern crate gdk_pixbuf;
use waveform::{
SampleSequence,
WaveformConfig,
Color,
BinnedWaveformRenderer,
TimeRange,
};
#[cfg(feature = "example-gui")]
use gtk::{ContainerExt, Image, Inhibit, WidgetExt, Window, WindowExt, WindowType};
#[cfg(feature = "example-gui")]
use gdk_pixbuf::Pixbuf;
fn | () {
#[cfg(feature = "example-gui")]
{
if gtk::init().is_err() {
panic!("Failed to initialize gtk.");
}
}
#[cfg(feature = "example-gui")]
let window = Window::new(WindowType::Toplevel);
#[cfg(feature = "example-gui")]
{
window.set_title("A simple waveform renderer test");
window.set_default_size(800, 100);
window.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(false)
});
}
// Generate samples to show.
let mut samples: Vec<f64> = Vec::new();
for t in 0..44100 {
samples.push(
((t as f64) / 100f64 * 2f64 * 3.1415f64).sin() * ((t as f64) / 10000f64 * 2f64 * 3.1415f64).sin(),
);
}
// The renderer's config.
let config = WaveformConfig::new(
-1f64, // Minimum amplitude to show
1f64, // Maximum amplitude to show
// Foreground color
Color::Vector4(0, 0, 0, 255),
// Background color
Color::Vector4(0, 0, 0, 0)
).unwrap();
// Put a reference to the samples here along with its sample rate.
// We need to set a sample rate because it will be used
// when you specify the time range in seconds.
let ss = SampleSequence {
data: &samples[..],
sample_rate: 44100f64,
};
// Construct the renderer.
// The second argument is the bin size.
// The renderer will bin the samples, and then
// calculate the minimum and maximum amplitude values for
// each bin.
let wfg = BinnedWaveformRenderer::new(&ss, 10, config).unwrap();
// Render!
// The renderer doesn't look at the actual audio samples here.
// Instead it will use the binned min/max values calculated above,
// making the rendering quite faster.
let vec: Vec<u8> =
wfg.render_vec(TimeRange::Seconds(0.0f64, 1.0f64), (800, 100))
.unwrap();
#[cfg(feature = "example-gui")]
{
let pixbuf =
Pixbuf::new_from_vec(vec, 0, true, 8, 800, 100, 800 * 4);
let image = Image::new_from_pixbuf(Some(&pixbuf));
window.add(&image);
window.show_all();
gtk::main();
}
}
| main |
p-1919bfb3.entry.js | import{r as t,e as i,f as a,c as n,i as e,h as r,H as o,g as s}from"./p-8dbe4215.js";import{s as l}from"./p-70b25770.js";const d=class{constructor(a){t(this,a),this.bkkrPaginator=i(this,"bkkrPaginator",7),this.bkkrPageSizeChanged=i(this,"bkkrPageSizeChanged",7),this.thrPx=0,this.thrPc=0,this.didFire=!1,this.isBusy=!1,this.isLoading=!1,this.threshold="15%",this.page=1,this.infinite=!1,this.toolbar=!0,this.position="bottom",this.arrowIcon="chevron",this.statusText="/",this.loadingType="ambient",this.loadingSpinner="crescent",this.disabled=!1,this.routerDirection="forward",this.onScroll=()=>{const t=this.scrollEl;if(!t||!this.canStart())return 1;const i=t.scrollTop,a=t.offsetHeight,n=0!==this.thrPc?a*this.thrPc:this.thrPx;if(("bottom"===this.position?t.scrollHeight-i-n-a:i-n)<0){if(!this.didFire)return this.isLoading=!0,this.didFire=!0,this.page+=1,this.bkkrPaginator.emit(),3}else this.didFire=!1;return 4},this.handleClick=(t,i)=>{t.preventDefault(),this.changeIndex(i)},this.handleLenghtChange=t=>{this.lenght=t.detail.value,this.page*this.lenght>this.items&&(this.page=Math.ceil(this.items/this.lenght)),this.bkkrPageSizeChanged.emit()}}thresholdChanged(){const t=this.threshold;t.lastIndexOf("%")>-1?(this.thrPx=0,this.thrPc=parseFloat(t)/100):(this.thrPx=parseFloat(t),this.thrPc=0)}itemsChanged(){a(this)}pageChanged(){a(this)}infiniteChanged(){const t=this.infinite&&!this.disabled;t||(this.isLoading=!1,this.isBusy=!1),this.enableScrollEvents(t)}disabledChanged(){this.disabled&&(this.isLoading=!1,this.isBusy=!1,this.infinite=!1)}async connectedCallback(){const t=this.el.closest("bkkr-content");t?(this.scrollEl=await t.getScrollElement(),this.thresholdChanged(),this.infiniteChanged(),this.disabledChanged(),"top"===this.position&&n((()=>{this.scrollEl&&(this.scrollEl.scrollTop=this.scrollEl.scrollHeight-this.scrollEl.clientHeight)}))):console.error("<bkkr-paginator> must be used inside an <bkkr-content>")}onResize(){a(this)}disconnectedCallback(){this.enableScrollEvents(!1),this.scrollEl=void 0}changeIndex(t){this.isLoading||(this.isLoading=!0,this.page=t,this.bkkrPaginator.emit())}async complete(){const t=this.scrollEl;if(this.isLoading&&t&&(this.isLoading=!1,"top"===this.position)){this.isBusy=!0;const i=t.scrollHeight-t.scrollTop;requestAnimationFrame((()=>{e((()=>{const a=t.scrollHeight-i;requestAnimationFrame((()=>{n((()=>{t.scrollTop=a,this.isBusy=!1}))}))}))}))}}canStart(){return!(this.disabled||this.isBusy||!this.scrollEl||this.isLoading||!this.infinite)}enableScrollEvents(t){this.scrollEl&&(t?this.scrollEl.addEventListener("scroll",this.onScroll):this.scrollEl.removeEventListener("scroll",this.onScroll))}render(){const{disabled:t,infinite:i,items:a,href:n,lenght:e,page:s,isLoading:d,loadingType:p,loadingSpinner:c,loadingText:b,toolbar:h,arrowIcon:m,statusText:k,handleClick:f}=this,u=window.innerWidth<768,w=i?1:s*e-(e-1),v=Math.min(s*e,a),x=Math.ceil(a/e),y=w>1,z=v<a,C=[];let j=!1;for(let t=1;t<=x;t++)1===t||t===x||t>=s-1&&t<s+2?(C.push({index:t,label:t,selected:t===s||null}),j=!1):!j&&t>1&&(t>=s-1||t<s+2)&&(C.push({index:null,label:"..."}),j=!0);return r(o,{class:{"paginator-loading":d,"paginator-enabled":!t,"paginator-infinite":i,["paginator-loading-"+p]:!0}},r("div",{class:"paginator-content"},r("slot",null)),"spinner"===p&&r("div",{class:"paginator-loading-content"},c&&r("div",{class:"paginator-loading-spinner"},r("bkkr-spinner",{type:c})),b&&r("div",{class:"paginator-loading-text",innerHTML:l(b)})),h&&!i&&r("nav",{class:"paginator-toolbar"},!u&&r("div",{class:"paginator-toolbar-fragment"},r("small",{class:"paginator-statustext"},w," - ",v," ",k," ",a),r("bkkr-select",{value:15,search:!1,interface:"popover",onBkkrChange:t=>this.handleLenghtChange(t)},r("bkkr-select-option",{value:15},"15"),r("bkkr-select-option",{value:30},"30"),r("bkkr-select-option",{value:50},"50"))),r("div",{class:"paginator-toolbar-fragment justify-content-center justify-content-md-end"},r("bkkr-button",{fill:"clear",color:"primary",href:void 0!==n?n+(s-1):null,disabled:!y,onClick:t=>this.handleClick(t,s-1)},r("bkkr-icon",{name:m+"-left"})),C.map((t=>g(t,{href:n,handleClick:f}))),r("bkkr-button",{fill:"clear",color:"primary",href:void 0!==n?n+(s+1):null,disabled:!z,onClick:t=>this.handleClick(t,s+1)},r("bkkr-icon",{name:m+"-right"})))))}get el(){return s(this)}static get watchers(){return{threshold:["thresholdChanged"],lenght:["itemsChanged"],page:["pageChanged"],infinite:["infiniteChanged"],disabled:["disabledChanged"]}}},g=(t,{href:i,handleClick:a})=>{if(!(window.innerWidth<768)||t.selected)return r("bkkr-button",{href:i&&t.index?i+t.index:null,onClick:i=>a(i,t.index),fill:t.selected?"solid":"clear",color:"primary",disabled:null===t.index},t.label)};d.style='.paginator-enabled{--padding-top:0;--padding-start:0;--padding-end:0;--padding-bottom:0;--toolbar-padding-top:var(--bkkr-spacer, 16px);--toolbar-padding-start:0;--toolbar-padding-end:0;--toolbar-padding-bottom:0;display:block}.paginator-infinite.paginator-enabled{min-height:84px}.paginator-content{padding-left:var(--padding-start);padding-right:var(--padding-end);padding-top:var(--padding-top);padding-bottom:var(--padding-bottom);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){.paginator-content{padding-left:unset;padding-right:unset;-webkit-padding-start:var(--padding-start);padding-inline-start:var(--padding-start);-webkit-padding-end:var(--padding-end);padding-inline-end:var(--padding-end)}}.paginator-loading-ambient .paginator-content{--background:radial-gradient(var(--color-base, var(--color-primary, #3880ff))), radial-gradient(var(--color-base, var(--color-primary, #3880ff))), radial-gradient(var(--color-base, var(--color-primary, #3880ff))), radial-gradient(var(--color-base, var(--color-primary, #3880ff)));--border-radius:0.75em;border-radius:var(--border-radius);position:relative;overflow:hidden}.paginator-loading-ambient .paginator-content::before{top:0;left:50%;position:absolute;width:100%;height:100%;-webkit-transform-origin:center top;transform-origin:center top;-webkit-transition:0.2s opacity cubic-bezier(0.32, 0.72, 0, 1);transition:0.2s opacity cubic-bezier(0.32, 0.72, 0, 1);background-image:var(--background);background-repeat:no-repeat;background-position:0 0, 100% 0, 100% 100%, 0 100%;background-size:50% 50%, 50% 50%;content:"";-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;z-index:-2;-webkit-animation-name:shimmer;animation-name:shimmer;-webkit-animation-duration:5s;animation-duration:5s;-webkit-animation-timing-function:cubic-bezier(0.32, 0.72, 0, 1);animation-timing-function:cubic-bezier(0.32, 0.72, 0, 1);-webkit-animation-iteration-count:infinite;animation-iteration-count:infinite;-webkit-animation-fill-mode:forwards;animation-fill-mode:forwards;-webkit-animation-play-state:paused;animation-play-state:paused;pointer-events:none}.paginator-loading-ambient .paginator-content::after{border-radius:var(--border-radius);top:50%;left:50%;position:absolute;width:100%;height:100%;-webkit-transform:translate(-50%, -50%);transform:translate(-50%, -50%);-webkit-transition:0.2s height cubic-bezier(0.32, 0.72, 0, 1), 0.2s -webkit-transform cubic-bezier(0.32, 0.72, 0, 1);transition:0.2s height cubic-bezier(0.32, 0.72, 0, 1), 0.2s -webkit-transform cubic-bezier(0.32, 0.72, 0, 1);transition:0.2s transform cubic-bezier(0.32, 0.72, 0, 1), 0.2s height cubic-bezier(0.32, 0.72, 0, 1);transition:0.2s transform cubic-bezier(0.32, 0.72, 0, 1), 0.2s height cubic-bezier(0.32, 0.72, 0, 1), 0.2s -webkit-transform cubic-bezier(0.32, 0.72, 0, 1);background:var(--bkkr-background-color, #fff);-webkit-filter:blur(10px);filter:blur(10px);content:"";-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;z-index:-1;pointer-events:none}@-webkit-keyframes shimmer{0%{-webkit-transform:translate(-50%, 100%) scale(1, 0);transform:translate(-50%, 100%) scale(1, 0);opacity:0}50%{-webkit-transform:translate(-50%, 0%) scale(1, 1);transform:translate(-50%, 0%) scale(1, 1);opacity:1}100%{-webkit-transform:translate(-50%, 0%) scale(1, 1);transform:translate(-50%, 0%) scale(1, 1);opacity:0}}@keyframes shimmer{0%{-webkit-transform:translate(-50%, 100%) scale(1, 0);transform:translate(-50%, 100%) scale(1, 0);opacity:0}50%{-webkit-transform:translate(-50%, 0%) scale(1, 1);transform:translate(-50%, 0%) scale(1, 1);opacity:1}100%{-webkit-transform:translate(-50%, 0%) scale(1, 1);transform:translate(-50%, 0%) scale(1, 1);opacity:0}}.paginator-loading-content{margin-left:0;margin-right:0;margin-top:0;margin-bottom:0;display:none;-ms-flex-direction:column;flex-direction:column;-ms-flex-pack:center;justify-content:center;width:100%;min-height:84px;text-align:center;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.paginator-loading-text{margin-left:32px;margin-right:32px;margin-top:4px;margin-bottom:0}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){.paginator-loading-text{margin-left:unset;margin-right:unset;-webkit-margin-start:32px;margin-inline-start:32px;-webkit-margin-end:32px;margin-inline-end:32px}}.paginator-loading>.paginator-content::before{opacity:1;-webkit-animation-play-state:running;animation-play-state:running}.paginator-loading>.paginator-content::after{width:calc(100% - 12px);height:calc(100% - 12px)}.paginator-loading>.paginator-loading-content{display:-ms-flexbox;display:flex}.paginator-toolbar{margin-left:0;margin-right:-0.9em;margin-top:0;margin-bottom:0;padding-left:var(--toolbar-padding-start);padding-right:var(--toolbar-padding-end);padding-top:var(--toolbar-padding-top);padding-bottom:var(--toolbar-padding-bottom);display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;-ms-flex-pack:justify;justify-content:space-between}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){.paginator-toolbar{margin-left:unset;margin-right:unset;-webkit-margin-start:0;margin-inline-start:0;-webkit-margin-end:-0.9em;margin-inline-end:-0.9em}}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){.paginator-toolbar{padding-left:unset;padding-right:unset;-webkit-padding-start:var(--toolbar-padding-start);padding-inline-start:var(--toolbar-padding-start);-webkit-padding-end:var(--toolbar-padding-end);padding-inline-end:var(--toolbar-padding-end)}}.paginator-toolbar bkkr-select{min-width:60px;font-size:0.875em}.paginator-toolbar-fragment{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;width:100%}.paginator-toolbar-fragment bkkr-button{min-width:2.8em}.paginator-toolbar-fragment .paginator-statustext{padding-right:var(--bkkr-spacer, 16px);position:relative}@supports ((-webkit-margin-start: 0) or (margin-inline-start: 0)) or (-webkit-margin-start: 0){.paginator-toolbar-fragment .paginator-statustext{padding-right:unset;-webkit-padding-end:var(--bkkr-spacer, 16px);padding-inline-end:var(--bkkr-spacer, 16px)}}.paginator-toolbar-fragment .paginator-statustext:after{top:0;right:0;position:absolute;width:0.55px;height:100%;background-color:var(--bkkr-border, rgba(var(--bkkr-text-color-rgb, 0, 0, 0), 0.1));content:""}';export{d as bkkr_paginator} |
||
hostingdotconfig.go | package atscfg
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import (
"sort"
"strconv"
"strings"
"github.com/apache/trafficcontrol/lib/go-log"
"github.com/apache/trafficcontrol/lib/go-tc"
)
const HostingConfigFileName = `hosting.config`
const HostingConfigParamConfigFile = `storage.config`
const ContentTypeHostingDotConfig = ContentTypeTextASCII
const LineCommentHostingDotConfig = LineCommentHash
const ParamDrivePrefix = "Drive_Prefix"
const ParamRAMDrivePrefix = "RAM_Drive_Prefix"
const ServerHostingDotConfigMidIncludeInactive = false
const ServerHostingDotConfigEdgeIncludeInactive = true
func MakeHostingDotConfig(
server *tc.ServerNullable,
toToolName string, // tm.toolname global parameter (TODO: cache itself?)
toURL string, // tm.url global parameter (TODO: cache itself?)
params map[string]string, // map[name]value - config file should always be storage.config
dses []tc.DeliveryServiceNullableV30,
topologies []tc.Topology,
) string | {
if server.HostName == nil || *server.HostName == "" {
return "Error: server had no host name!"
}
text := GenericHeaderComment(*server.HostName, toToolName, toURL)
nameTopologies := MakeTopologyNameMap(topologies)
lines := []string{}
if _, ok := params[ParamRAMDrivePrefix]; ok {
nextVolume := 1
if _, ok := params[ParamDrivePrefix]; ok {
diskVolume := nextVolume
text += `# TRAFFIC OPS NOTE: volume ` + strconv.Itoa(diskVolume) + ` is the Disk volume` + "\n"
nextVolume++
}
ramVolume := nextVolume
text += `# TRAFFIC OPS NOTE: volume ` + strconv.Itoa(ramVolume) + ` is the RAM volume` + "\n"
seenOrigins := map[string]struct{}{}
for _, ds := range dses {
if ds.OrgServerFQDN == nil || ds.XMLID == nil || ds.Active == nil {
continue // TODO warn?
}
origin := *ds.OrgServerFQDN
if _, ok := seenOrigins[origin]; ok {
continue
}
if ds.Topology != nil && *ds.Topology != "" {
topology, hasTopology := nameTopologies[TopologyName(*ds.Topology)]
if hasTopology {
topoHasServer, err := topologyIncludesServerNullable(topology, server)
if err != nil {
log.Errorln("Error checking if topology has server, skipping! : " + err.Error())
topoHasServer = false
}
if !topoHasServer {
continue
}
}
}
seenOrigins[origin] = struct{}{}
origin = strings.TrimPrefix(origin, `http://`)
origin = strings.TrimPrefix(origin, `https://`)
lines = append(lines, `hostname=`+origin+` volume=`+strconv.Itoa(ramVolume)+"\n")
}
}
diskVolume := 1 // note this will actually be the RAM (RAM_Drive_Prefix) volume if there is no Drive_Prefix parameter.
lines = append(lines, `hostname=* volume=`+strconv.Itoa(diskVolume)+"\n")
sort.Strings(lines)
text += strings.Join(lines, "")
return text
} |
|
54.54d5be04.js | (window.webpackJsonp=window.webpackJsonp||[]).push([[54],{498:function(a,s,t){"use strict";t.r(s);var r=t(9),i=Object(r.a)({},(function(){var a=this,s=a.$createElement,t=a._self._c||s;return t("ContentSlotsDistributor",{attrs:{"slot-key":a.$parent.slotKey}},[t("h1",{attrs:{id:"flashair-sd-产品"}},[t("a",{staticClass:"header-anchor",attrs:{href:"#flashair-sd-产品"}},[a._v("#")]),a._v(" FlashAir SD 产品")]),a._v(" "),t("h2",{attrs:{id:"可以使用-flashair-sd-卡的设备"}},[t("a",{staticClass:"header-anchor",attrs:{href:"#可以使用-flashair-sd-卡的设备"}},[a._v("#")]),a._v(" 可以使用 FlashAir SD 卡的设备")]),a._v(" "),t("p",[a._v("FlashAir SD 卡包含了 flash 闪存,它在完全遵循SD卡的各种标准情况下,添加了支持无线Lan的芯片和天线。因此可以视作一个标准的SD内存卡使用在电子设备上,比如带有 SDHC™ 槽的笔记本电脑等。")]),a._v(" "),t("h2",{attrs:{id:"flashair-替代品"}},[t("a",{staticClass:"header-anchor",attrs:{href:"#flashair-替代品"}},[a._v("#")]),a._v(" FlashAir 替代品")]),a._v(" "),t("p",[a._v("非常非常遗憾:并没有所谓的 FlashAir SD 卡的替代品。 一方面因为东芝财务作假经营不善,另一方面许多相机已经内置 Wi-Fi,因此类卡的市场越变越小。")])])}),[],!1,null,null,null);s.default=i.exports}}]); |
||
generate-index.js | "use strict";
// N A T I V E
import { readFile, writeFileSync } from "fs";
import path from "path";
// I M P O R T
import cwd from "cwd";
// E X P O R T
export default options => {
let { input, output, template } = options;
input = path.join(cwd(), input);
output = path.join(cwd(), output);
return new Promise((resolve, reject) =>
readFile(input, (error, buffer) => {
if (error)
return reject(error);
let html = buffer.toString("utf8");
if (template) {
Object.keys(template).forEach(key => {
const regex = new RegExp(`<%= +${key} +%>`, "g");
html = html.replace(regex, template[key]);
});
}
try {
resolve(writeFileSync(output, html, "utf8"));
} catch(writeError) {
throw(writeError); // App cannot run without this so exit forcefully
} | })
);
}; |
|
prefixer.rs | use std::{iter::once, mem::take};
use swc_common::{Span, DUMMY_SP};
use swc_css_ast::*;
use swc_css_utils::replace_ident;
use swc_css_visit::{VisitMut, VisitMutWith};
pub fn prefixer() -> impl VisitMut {
Prefixer::default()
}
#[derive(Default)]
struct | {
added: Vec<Declaration>,
}
impl Prefixer {
fn handle_cursor_image_set(
&mut self,
v: &mut Value,
second: Option<Value>,
important: Option<Span>,
) {
match v {
Value::Fn(f) => match &*f.name.value {
"image-set" => {
let val = Value::Fn(FnValue {
span: DUMMY_SP,
name: Ident {
span: DUMMY_SP,
value: "-webkit-image-set".into(),
raw: "-webkit-image-set".into(),
},
args: f.args.clone(),
});
let second = second.map(|v| match &v {
Value::Ident(t) => {
if &*t.value == "grab" {
Value::Ident(Ident {
span: t.span,
value: "-webkit-grab".into(),
raw: "-webkit-grab".into(),
})
} else {
v
}
}
_ => v,
});
self.added.push(Declaration {
span: DUMMY_SP,
property: Ident {
span: DUMMY_SP,
value: "cursor".into(),
raw: "cursor".into(),
},
value: {
let val = Value::Comma(CommaValues {
span: DUMMY_SP,
values: once(val).chain(second).collect(),
});
vec![val]
},
important,
});
}
_ => {}
},
Value::Comma(c) => {
if c.values.len() >= 1 {
let second = c.values.get(1).cloned();
self.handle_cursor_image_set(&mut c.values[0], second, important);
}
}
_ => {}
}
}
}
impl VisitMut for Prefixer {
fn visit_mut_declaration_block_items(&mut self, props: &mut Vec<DeclarationBlockItem>) {
let mut new = vec![];
for mut n in take(props) {
n.visit_mut_with(self);
new.extend(self.added.drain(..).map(DeclarationBlockItem::Declaration));
new.push(n);
}
*props = new;
}
fn visit_mut_declaration(&mut self, n: &mut Declaration) {
n.visit_mut_children_with(self);
macro_rules! simple {
($property:expr,$val:expr) => {{
let val = Value::Ident(Ident {
span: DUMMY_SP,
value: $val.into(),
raw: $val.into(),
});
self.added.push(Declaration {
span: n.span,
property: Ident {
span: n.property.span,
value: $property.into(),
raw: $property.into(),
},
value: vec![val],
important: n.important.clone(),
});
}};
}
macro_rules! same_content {
($property:expr) => {{
self.added.push(Declaration {
span: n.span,
property: Ident {
span: n.property.span,
value: $property.into(),
raw: $property.into(),
},
value: n.value.clone(),
important: n.important.clone(),
});
}};
}
macro_rules! same_name {
($property:expr) => {{
let val = Ident {
span: DUMMY_SP,
value: $property.into(),
raw: $property.into(),
};
self.added.push(Declaration {
span: n.span,
property: n.property.clone(),
value: vec![Value::Ident(val)],
important: n.important.clone(),
});
}};
}
match &*n.property.value {
"appearance" => {
same_content!("-webkit-appearance");
same_content!("-moz-appearance");
same_content!("-ms-appearance");
}
"animation" => {
same_content!("-webkit-animation");
}
"animation-duration" => {
same_content!("-webkit-animation-duration");
}
"animation-name" => {
same_content!("-webkit-animation-name");
}
"animation-iteration-count" => {
same_content!("-webkit-animation-iteration-count");
}
"animation-timing-function" => {
same_content!("-webkit-animation-timing-function");
}
"background-clip" => {
same_content!("-webkit-background-clip");
}
"box-decoration-break" => {
same_content!("-webkit-box-decoration-break");
}
"color-adjust" => {
same_content!("-webkit-print-color-adjust");
}
"columns" => {
same_content!("-webkit-columns");
}
"column-count" => {
same_content!("-webkit-column-count");
}
"column-fill" => {
same_content!("-webkit-column-fill");
}
"column-gap" => {
same_content!("-webkit-column-gap");
}
"column-rule" => {
same_content!("-webkit-column-rule");
}
"column-rule-color" => {
same_content!("-webkit-column-rule-color");
}
"column-rule-style" => {
same_content!("-webkit-column-rule-style");
}
"column-span" => {
same_content!("-webkit-column-span");
}
"column-rule-width" => {
same_content!("-webkit-column-rule-width");
}
"column-width" => {
same_content!("-webkit-column-width");
}
"background" => {
if n.value.len() >= 1 {
match &n.value[0] {
Value::Fn(f) => match &*f.name.value {
"image-set" => {
let val = Value::Fn(FnValue {
span: DUMMY_SP,
name: Ident {
span: DUMMY_SP,
value: "-webkit-image-set".into(),
raw: "-webkit-image-set".into(),
},
args: f.args.clone(),
});
self.added.push(Declaration {
span: n.span,
property: n.property.clone(),
value: vec![val],
important: n.important.clone(),
});
}
_ => {}
},
_ => {}
}
}
}
"background-image" => {
if n.value.len() >= 1 {
match &n.value[0] {
Value::Fn(f) => match &*f.name.value {
"image-set" => {
let val = Value::Fn(FnValue {
span: DUMMY_SP,
name: Ident {
span: DUMMY_SP,
value: "-webkit-image-set".into(),
raw: "-webkit-image-set".into(),
},
args: f.args.clone(),
});
self.added.push(Declaration {
span: n.span,
property: n.property.clone(),
value: vec![val],
important: n.important.clone(),
});
}
_ => {}
},
_ => {}
}
}
}
"cursor" => {
if n.value.len() >= 1 {
match &n.value[0] {
Value::Ident(Ident { value, .. }) => match &**value {
"grab" => {
same_name!("-webkit-grab");
}
_ => {}
},
_ => {
let second = n.value.get(1).cloned();
self.handle_cursor_image_set(&mut n.value[0], second, n.important);
}
}
}
}
"display" => {
if n.value.len() == 1 {
match &n.value[0] {
Value::Ident(Ident { value, .. }) => match &**value {
"flex" => {
same_name!("-webkit-box");
same_name!("-webkit-flex");
same_name!("-ms-flexbox");
}
"inline-flex" => {
same_name!("-webkit-inline-box");
same_name!("-webkit-inline-flex");
same_name!("-ms-inline-flexbox");
}
_ => {}
},
_ => {}
}
}
}
"flex" => {
same_content!("-webkit-flex");
same_content!("-ms-flex");
}
"flex-grow" => {
same_content!("-webkit-box-flex");
same_content!("-webkit-flex-grow");
same_content!("-ms-flex-positive");
}
"flex-shrink" => {
same_content!("-webkit-flex-shrink");
same_content!("-ms-flex-negative");
}
"flex-basis" => {
same_content!("-webkit-flex-basis");
same_content!("-ms-flex-preferred-size");
}
"align-self" => {
same_content!("-webkit-align-self");
same_content!("-ms-flex-item-align");
}
"align-content" => {
same_content!("-webkit-align-content");
same_content!("-ms-flex-line-pack");
}
"align-items" => {
same_content!("-webkit-align-items");
same_content!("-webkit-box-align");
same_content!("-ms-flex-align");
}
"justify-content" => {
if n.value.len() == 1 {
match &n.value[0] {
Value::Ident(Ident { value, .. }) => match &**value {
"flex-end" => {
simple!("-webkit-box-pack", "end");
simple!("-ms-flex-pack", "end");
}
"flex-start" => {
simple!("-webkit-box-pack", "start");
simple!("-ms-flex-pack", "start");
}
"justify" => {
same_content!("-webkit-box-pack");
same_content!("-ms-flex-pack");
}
"space-between" => {
simple!("-webkit-box-pack", "justify");
}
_ => {}
},
_ => {}
}
}
same_content!("-webkit-justify-content");
}
"order" => {
same_content!("-webkit-order");
same_content!("-ms-flex-order");
}
"flex-direction" => {
same_content!("-webkit-flex-direction");
same_content!("-ms-flex-direction");
}
"filter" => {
same_content!("-webkit-filter");
}
"mask" => {
same_content!("-webkit-mask");
}
"mask-image" => {
same_content!("-webkit-mask-image");
}
"mask-mode" => {
same_content!("-webkit-mask-mode");
}
"mask-clip" => {
same_content!("-webkit-mask-clip");
}
"mask-size" => {
same_content!("-webkit-mask-size");
}
"mask-repeat" => {
same_content!("-webkit-mask-repeat");
}
"mask-origin" => {
same_content!("-webkit-mask-origin");
}
"mask-position" => {
same_content!("-webkit-mask-position");
}
"mask-composite" => {
same_content!("-webkit-mask-composite");
}
"margin-inline-start" => {
same_content!("-webkit-margin-start");
}
"margin-inline-end" => {
same_content!("-webkit-margin-end");
}
"backface-visibility" => {
same_content!("-webkit-backface-visibility");
}
"clip-path" => {
same_content!("-webkit-clip-path");
}
"position" => {
if n.value.len() == 1 {
match &n.value[0] {
Value::Ident(Ident { value, .. }) => match &**value {
"sticky" => {
same_name!("-webkit-sticky");
}
_ => {}
},
_ => {}
}
}
}
"user-select" => {
same_content!("-webkit-user-select");
same_content!("-moz-user-select");
same_content!("-ms-user-select");
}
"transform" => {
same_content!("-webkit-transform");
same_content!("-moz-transform");
same_content!("-ms-transform");
}
"text-decoration" => {
if n.value.len() == 1 {
match &n.value[0] {
Value::Ident(Ident { value, .. }) => match &**value {
"none" => {
same_content!("-webkit-text-decoration");
}
_ => {}
},
_ => {}
}
}
}
"text-size-adjust" => {
if n.value.len() == 1 {
match &n.value[0] {
Value::Ident(Ident { value, .. }) => match &**value {
"none" => {
same_content!("-webkit-text-size-adjust");
same_content!("-moz-text-size-adjust");
same_content!("-ms-text-size-adjust");
}
_ => {}
},
_ => {}
}
}
}
"transition" => {
let mut value = n.value.clone();
replace_ident(&mut value, "transform", "-webkit-transform");
self.added.push(Declaration {
span: n.span,
property: Ident {
span: n.property.span,
value: "-webkit-transition".into(),
raw: "-webkit-transition".into(),
},
value,
important: n.important.clone(),
});
}
"writing-mode" => {
if n.value.len() == 1 {
match &n.value[0] {
Value::Ident(Ident { value, .. }) => match &**value {
"none" => {
same_content!("-webkit-writing-mode");
same_content!("-ms-writing-mode");
}
"vertical-lr" | "sideways-lr" => {
same_content!("-webkit-writing-mode");
simple!("-ms-writing-mode", "tb");
}
"vertical-rl" | "sideways-rl" => {
same_content!("-webkit-writing-mode");
simple!("-ms-writing-mode", "tb-rl");
}
"horizontal-tb" => {
same_content!("-webkit-writing-mode");
simple!("-ms-writing-mode", "lr");
}
_ => {}
},
_ => {}
}
}
}
"min-width" | "width" | "max-width" | "min-height" | "height" | "max-height"
| "min-block-size" | "min-inline-size" => {
if n.value.len() == 1 {
match &n.value[0] {
Value::Ident(Ident { value, .. }) => match &**value {
"fit-content" => {
same_name!("-webkit-fit-content");
same_name!("-moz-fit-content");
}
"max-content" => {
same_name!("-webkit-max-content");
same_name!("-moz-max-content");
}
"min-content" => {
same_name!("-webkit-min-content");
same_name!("-moz-min-content");
}
"fill-available" => {
same_name!("-webkit-fill-available");
same_name!("-moz-available");
}
"stretch" => {
same_name!("-webkit-fill-available");
same_name!("-moz-available");
same_name!("fill-available");
}
_ => {}
},
_ => {}
}
}
}
_ => {}
}
}
}
| Prefixer |
03_finding_images.py | '''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/GetStarted/03_finding_images.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/03_finding_images.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=GetStarted/03_finding_images.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/03_finding_images.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
'''
# %%
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
'''
# %%
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1')
point = ee.Geometry.Point(-122.262, 37.8719)
start = ee.Date('2014-06-01')
finish = ee.Date('2014-10-01')
filteredCollection = ee.ImageCollection('LANDSAT/LC08/C01/T1') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
first = filteredCollection.first()
# Define visualization parameters in an object literal.
vizParams = {'bands': ['B5', 'B4', 'B3'],
'min': 5000, 'max': 15000, 'gamma': 1.3}
Map.addLayer(first, vizParams, 'Landsat 8 image')
# Load a feature collection.
featureCollection = ee.FeatureCollection('TIGER/2016/States')
# Filter the collection.
filteredFC = featureCollection.filter(ee.Filter.eq('NAME', 'California'))
# Display the collection.
Map.addLayer(ee.Image().paint(filteredFC, 0, 2),
{'palette': 'red'}, 'California')
| ## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map | # %%
''' |
users.go | package simple
import (
"context"
"github.com/sv-tools/mongoifc"
)
const (
UsersCollection = "users"
)
type User struct {
ID string `json:"id,omitempty" bson:"_id,omitempty"`
Name string `json:"name,omitempty" bson:"name,omitempty"`
Email string `json:"email,omitempty" bson:"email,omitempty"`
Active bool `json:"active,omitempty" bson:"active,omitempty"`
IsAdmin bool `json:"is_admin,omitempty" bson:"is_admin,omitempty"`
}
func | (ctx context.Context, db mongoifc.Database) ([]*User, error) {
var users []*User
cur, err := db.Collection(UsersCollection).Find(ctx, User{
Active: true,
IsAdmin: true,
})
if err != nil {
return nil, err
}
if err := cur.All(ctx, &users); err != nil {
return nil, err
}
return users, err
}
| GetAdmins |
query_executor.go | // Copyright 2014, Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tabletserver
import (
"fmt"
"time"
log "github.com/golang/glog"
"github.com/youtube/vitess/go/hack"
mproto "github.com/youtube/vitess/go/mysql/proto"
"github.com/youtube/vitess/go/sqltypes"
"github.com/youtube/vitess/go/vt/callinfo"
"github.com/youtube/vitess/go/vt/schema"
"github.com/youtube/vitess/go/vt/sqlparser"
"github.com/youtube/vitess/go/vt/tabletserver/planbuilder"
"golang.org/x/net/context"
)
// QueryExecutor is used for executing a query request.
type QueryExecutor struct {
query string
bindVars map[string]interface{}
transactionID int64
plan *ExecPlan
ctx context.Context
logStats *SQLQueryStats
qe *QueryEngine
}
// poolConn is the interface implemented by users of this specialized pool.
type poolConn interface {
Exec(ctx context.Context, query string, maxrows int, wantfields bool) (*mproto.QueryResult, error)
}
// Execute performs a non-streaming query execution.
func (qre *QueryExecutor) Execute() (reply *mproto.QueryResult, err error) {
qre.logStats.OriginalSql = qre.query
qre.logStats.BindVariables = qre.bindVars
qre.logStats.TransactionID = qre.transactionID
planName := qre.plan.PlanId.String()
qre.logStats.PlanType = planName
defer func(start time.Time) {
duration := time.Now().Sub(start)
qre.qe.queryServiceStats.QueryStats.Add(planName, duration)
if reply == nil {
qre.plan.AddStats(1, duration, 0, 1)
return
}
qre.plan.AddStats(1, duration, int64(reply.RowsAffected), 0)
qre.logStats.RowsAffected = int(reply.RowsAffected)
qre.logStats.Rows = reply.Rows
qre.qe.queryServiceStats.ResultStats.Add(int64(len(reply.Rows)))
}(time.Now())
if err := qre.checkPermissions(); err != nil {
return nil, err
}
if qre.plan.PlanId == planbuilder.PLAN_DDL {
return qre.execDDL()
}
if qre.transactionID != 0 {
// Need upfront connection for DMLs and transactions
conn := qre.qe.txPool.Get(qre.transactionID)
defer conn.Recycle()
conn.RecordQuery(qre.query)
var invalidator CacheInvalidator
if qre.plan.TableInfo != nil && qre.plan.TableInfo.CacheType != schema.CACHE_NONE {
invalidator = conn.DirtyKeys(qre.plan.TableName)
}
switch qre.plan.PlanId {
case planbuilder.PLAN_PASS_DML:
if qre.qe.strictMode.Get() != 0 {
return nil, NewTabletError(ErrFail, "DML too complex")
}
reply, err = qre.directFetch(conn, qre.plan.FullQuery, qre.bindVars, nil)
case planbuilder.PLAN_INSERT_PK:
reply, err = qre.execInsertPK(conn)
case planbuilder.PLAN_INSERT_SUBQUERY:
reply, err = qre.execInsertSubquery(conn)
case planbuilder.PLAN_DML_PK:
reply, err = qre.execDMLPK(conn, invalidator)
case planbuilder.PLAN_DML_SUBQUERY:
reply, err = qre.execDMLSubquery(conn, invalidator)
case planbuilder.PLAN_OTHER:
reply, err = qre.execSQL(conn, qre.query, true)
default: // select or set in a transaction, just count as select
reply, err = qre.execDirect(conn)
}
} else {
switch qre.plan.PlanId {
case planbuilder.PLAN_PASS_SELECT:
if qre.plan.Reason == planbuilder.REASON_LOCK {
return nil, NewTabletError(ErrFail, "Disallowed outside transaction")
}
reply, err = qre.execSelect()
case planbuilder.PLAN_PK_IN:
reply, err = qre.execPKIN()
case planbuilder.PLAN_SELECT_SUBQUERY:
reply, err = qre.execSubquery()
case planbuilder.PLAN_SET:
reply, err = qre.execSet()
case planbuilder.PLAN_OTHER:
conn, connErr := qre.getConn(qre.qe.connPool)
if connErr != nil {
return nil, connErr
}
defer conn.Recycle()
reply, err = qre.execSQL(conn, qre.query, true)
default:
if !qre.qe.enableAutoCommit {
return nil, NewTabletError(ErrFatal, "unsupported query: %s", qre.query)
}
reply, err = qre.execDmlAutoCommit()
}
}
return reply, err
}
// Stream performs a streaming query execution.
func (qre *QueryExecutor) Stream(sendReply func(*mproto.QueryResult) error) error {
qre.logStats.OriginalSql = qre.query
qre.logStats.PlanType = qre.plan.PlanId.String()
defer qre.qe.queryServiceStats.QueryStats.Record(qre.plan.PlanId.String(), time.Now())
if err := qre.checkPermissions(); err != nil {
return err
}
conn, err := qre.getConn(qre.qe.streamConnPool)
if err != nil {
return err
}
defer conn.Recycle()
qd := NewQueryDetail(qre.logStats.ctx, conn)
qre.qe.streamQList.Add(qd)
defer qre.qe.streamQList.Remove(qd)
return qre.fullStreamFetch(conn, qre.plan.FullQuery, qre.bindVars, nil, sendReply)
}
func (qre *QueryExecutor) execDmlAutoCommit() (reply *mproto.QueryResult, err error) {
transactionID := qre.qe.txPool.Begin(qre.ctx)
qre.logStats.AddRewrittenSql("begin", time.Now())
defer func() {
// TxPool.Get may panic
if panicErr := recover(); panicErr != nil {
err = fmt.Errorf("DML autocommit got panic: %v", panicErr)
}
if err != nil {
qre.qe.txPool.Rollback(qre.ctx, transactionID)
qre.logStats.AddRewrittenSql("rollback", time.Now())
} else {
qre.qe.Commit(qre.ctx, qre.logStats, transactionID)
qre.logStats.AddRewrittenSql("commit", time.Now())
}
}()
conn := qre.qe.txPool.Get(transactionID)
defer conn.Recycle()
var invalidator CacheInvalidator
if qre.plan.TableInfo != nil && qre.plan.TableInfo.CacheType != schema.CACHE_NONE {
invalidator = conn.DirtyKeys(qre.plan.TableName)
}
switch qre.plan.PlanId {
case planbuilder.PLAN_PASS_DML:
if qre.qe.strictMode.Get() != 0 {
return nil, NewTabletError(ErrFail, "DML too complex")
}
reply, err = qre.directFetch(conn, qre.plan.FullQuery, qre.bindVars, nil)
case planbuilder.PLAN_INSERT_PK:
reply, err = qre.execInsertPK(conn)
case planbuilder.PLAN_INSERT_SUBQUERY:
reply, err = qre.execInsertSubquery(conn)
case planbuilder.PLAN_DML_PK:
reply, err = qre.execDMLPK(conn, invalidator)
case planbuilder.PLAN_DML_SUBQUERY:
reply, err = qre.execDMLSubquery(conn, invalidator)
default:
return nil, NewTabletError(ErrFatal, "unsupported query: %s", qre.query)
}
return reply, err
}
// checkPermissions
func (qre *QueryExecutor) checkPermissions() error {
// Skip permissions check if we have a background context.
if qre.ctx == context.Background() {
return nil
}
// Blacklist
remoteAddr := ""
username := ""
ci, ok := callinfo.FromContext(qre.ctx)
if ok {
remoteAddr = ci.RemoteAddr()
username = ci.Username()
}
action, desc := qre.plan.Rules.getAction(remoteAddr, username, qre.bindVars)
switch action {
case QR_FAIL:
return NewTabletError(ErrFail, "Query disallowed due to rule: %s", desc)
case QR_FAIL_RETRY:
return NewTabletError(ErrRetry, "Query disallowed due to rule: %s", desc)
}
// a superuser that exempts from table ACL checking.
if qre.qe.exemptACL == username {
qre.qe.tableaclExemptCount.Add(1)
return nil
}
tableACLStatsKey := []string{
qre.plan.TableName,
// TODO(shengzhe): use table group instead of username.
username,
qre.plan.PlanId.String(),
username,
}
if qre.plan.Authorized == nil {
return NewTabletError(ErrFail, "table acl error: nil acl")
}
// perform table ACL check if it is enabled.
if !qre.plan.Authorized.IsMember(username) {
if qre.qe.enableTableAclDryRun {
qre.qe.tableaclPseudoDenied.Add(tableACLStatsKey, 1)
return nil
}
// raise error if in strictTableAcl mode, else just log an error.
if qre.qe.strictTableAcl {
errStr := fmt.Sprintf("table acl error: %q cannot run %v on table %q", username, qre.plan.PlanId, qre.plan.TableName)
qre.qe.tableaclDenied.Add(tableACLStatsKey, 1)
qre.qe.accessCheckerLogger.Errorf("%s", errStr)
return NewTabletError(ErrFail, "%s", errStr)
}
return nil
}
qre.qe.tableaclAllowed.Add(tableACLStatsKey, 1)
return nil
}
func (qre *QueryExecutor) execDDL() (*mproto.QueryResult, error) {
ddlPlan := planbuilder.DDLParse(qre.query)
if ddlPlan.Action == "" {
return nil, NewTabletError(ErrFail, "DDL is not understood")
}
txid := qre.qe.txPool.Begin(qre.ctx)
defer qre.qe.txPool.SafeCommit(qre.ctx, txid)
// Stolen from Execute
conn := qre.qe.txPool.Get(txid)
defer conn.Recycle()
result, err := qre.execSQL(conn, qre.query, false)
if err != nil {
return nil, err
}
if ddlPlan.TableName != "" && ddlPlan.TableName != ddlPlan.NewName {
// It's a drop or rename.
qre.qe.schemaInfo.DropTable(ddlPlan.TableName)
}
if ddlPlan.NewName != "" {
qre.qe.schemaInfo.CreateOrUpdateTable(qre.ctx, ddlPlan.NewName)
}
return result, nil
}
func (qre *QueryExecutor) execPKIN() (*mproto.QueryResult, error) {
pkRows, err := buildValueList(qre.plan.TableInfo, qre.plan.PKValues, qre.bindVars)
if err != nil {
return nil, err
}
limit, err := getLimit(qre.plan.Limit, qre.bindVars)
if err != nil {
return nil, err
}
return qre.fetchMulti(pkRows, limit)
}
func (qre *QueryExecutor) execSubquery() (*mproto.QueryResult, error) {
innerResult, err := qre.qFetch(qre.logStats, qre.plan.Subquery, qre.bindVars)
if err != nil {
return nil, err
}
return qre.fetchMulti(innerResult.Rows, -1)
}
func (qre *QueryExecutor) fetchMulti(pkRows [][]sqltypes.Value, limit int64) (*mproto.QueryResult, error) {
if qre.plan.Fields == nil {
return nil, NewTabletError(ErrFatal, "query plan.Fields is empty")
}
result := &mproto.QueryResult{Fields: qre.plan.Fields}
if len(pkRows) == 0 || limit == 0 {
return result, nil
}
tableInfo := qre.plan.TableInfo
keys := make([]string, len(pkRows))
for i, pk := range pkRows {
keys[i] = buildKey(pk)
}
rcresults := tableInfo.Cache.Get(qre.ctx, keys)
rows := make([][]sqltypes.Value, 0, len(pkRows))
missingRows := make([][]sqltypes.Value, 0, len(pkRows))
var hits, absent, misses int64
for i, pk := range pkRows {
rcresult := rcresults[keys[i]]
if rcresult.Row != nil {
if qre.mustVerify() {
err := qre.spotCheck(rcresult, pk)
if err != nil {
return nil, err
}
}
rows = append(rows, applyFilter(qre.plan.ColumnNumbers, rcresult.Row))
hits++
} else {
missingRows = append(missingRows, pk)
}
}
if len(missingRows) != 0 {
bv := map[string]interface{}{
"#pk": sqlparser.TupleEqualityList{
Columns: qre.plan.TableInfo.Indexes[0].Columns,
Rows: missingRows,
},
}
resultFromdb, err := qre.qFetch(qre.logStats, qre.plan.OuterQuery, bv)
if err != nil {
return nil, err
}
misses = int64(len(resultFromdb.Rows))
absent = int64(len(pkRows)) - hits - misses
for _, row := range resultFromdb.Rows {
rows = append(rows, applyFilter(qre.plan.ColumnNumbers, row))
key := buildKey(applyFilter(qre.plan.TableInfo.PKColumns, row))
tableInfo.Cache.Set(qre.ctx, key, row, rcresults[key].Cas)
}
}
qre.logStats.CacheHits = hits
qre.logStats.CacheAbsent = absent
qre.logStats.CacheMisses = misses
qre.logStats.QuerySources |= QuerySourceRowcache
tableInfo.hits.Add(hits)
tableInfo.absent.Add(absent)
tableInfo.misses.Add(misses)
result.RowsAffected = uint64(len(rows))
result.Rows = rows
// limit == 0 is already addressed upfront.
if limit > 0 && len(result.Rows) > int(limit) {
result.Rows = result.Rows[:limit]
result.RowsAffected = uint64(limit)
}
return result, nil
}
func (qre *QueryExecutor) mustVerify() bool {
return (Rand() % spotCheckMultiplier) < qre.qe.spotCheckFreq.Get()
}
func (qre *QueryExecutor) spotCheck(rcresult RCResult, pk []sqltypes.Value) error {
qre.qe.queryServiceStats.SpotCheckCount.Add(1)
bv := map[string]interface{}{
"#pk": sqlparser.TupleEqualityList{
Columns: qre.plan.TableInfo.Indexes[0].Columns,
Rows: [][]sqltypes.Value{pk},
},
}
resultFromdb, err := qre.qFetch(qre.logStats, qre.plan.OuterQuery, bv)
if err != nil {
return err
}
var dbrow []sqltypes.Value
if len(resultFromdb.Rows) != 0 {
dbrow = resultFromdb.Rows[0]
}
if dbrow == nil || !rowsAreEqual(rcresult.Row, dbrow) {
qre.qe.Launch(func() { qre.recheckLater(rcresult, dbrow, pk) })
}
return nil
}
func (qre *QueryExecutor) recheckLater(rcresult RCResult, dbrow []sqltypes.Value, pk []sqltypes.Value) {
time.Sleep(10 * time.Second)
keys := make([]string, 1)
keys[0] = buildKey(pk)
reloaded := qre.plan.TableInfo.Cache.Get(context.Background(), keys)[keys[0]]
// If reloaded row is absent or has changed, we're good
if reloaded.Row == nil || reloaded.Cas != rcresult.Cas {
return
}
log.Warningf("query: %v", qre.plan.FullQuery)
log.Warningf("mismatch for: %v\ncache: %v\ndb: %v", pk, rcresult.Row, dbrow)
qre.qe.queryServiceStats.InternalErrors.Add("Mismatch", 1)
}
// execDirect always sends the query to mysql
func (qre *QueryExecutor) execDirect(conn poolConn) (*mproto.QueryResult, error) {
if qre.plan.Fields != nil {
result, err := qre.directFetch(conn, qre.plan.FullQuery, qre.bindVars, nil)
if err != nil {
return nil, err
}
result.Fields = qre.plan.Fields
return result, nil
}
return qre.fullFetch(conn, qre.plan.FullQuery, qre.bindVars, nil)
}
// execSelect sends a query to mysql only if another identical query is not running. Otherwise, it waits and
// reuses the result. If the plan is missng field info, it sends the query to mysql requesting full info.
func (qre *QueryExecutor) execSelect() (*mproto.QueryResult, error) {
if qre.plan.Fields != nil {
result, err := qre.qFetch(qre.logStats, qre.plan.FullQuery, qre.bindVars)
if err != nil {
return nil, err
}
result.Fields = qre.plan.Fields
return result, nil
}
conn, err := qre.getConn(qre.qe.connPool)
if err != nil {
return nil, err
}
defer conn.Recycle()
return qre.fullFetch(conn, qre.plan.FullQuery, qre.bindVars, nil)
}
func (qre *QueryExecutor) execInsertPK(conn poolConn) (*mproto.QueryResult, error) {
pkRows, err := buildValueList(qre.plan.TableInfo, qre.plan.PKValues, qre.bindVars)
if err != nil {
return nil, err
}
return qre.execInsertPKRows(conn, pkRows)
}
func (qre *QueryExecutor) execInsertSubquery(conn poolConn) (*mproto.QueryResult, error) {
innerResult, err := qre.directFetch(conn, qre.plan.Subquery, qre.bindVars, nil)
if err != nil {
return nil, err
}
innerRows := innerResult.Rows
if len(innerRows) == 0 {
return &mproto.QueryResult{RowsAffected: 0}, nil
}
if len(qre.plan.ColumnNumbers) != len(innerRows[0]) {
return nil, NewTabletError(ErrFail, "Subquery length does not match column list")
}
pkRows := make([][]sqltypes.Value, len(innerRows))
for i, innerRow := range innerRows {
pkRows[i] = applyFilterWithPKDefaults(qre.plan.TableInfo, qre.plan.SubqueryPKColumns, innerRow)
}
// Validating first row is sufficient
if err := validateRow(qre.plan.TableInfo, qre.plan.TableInfo.PKColumns, pkRows[0]); err != nil {
return nil, err
}
qre.bindVars["#values"] = innerRows
return qre.execInsertPKRows(conn, pkRows)
}
func (qre *QueryExecutor) execInsertPKRows(conn poolConn, pkRows [][]sqltypes.Value) (*mproto.QueryResult, error) {
secondaryList, err := buildSecondaryList(qre.plan.TableInfo, pkRows, qre.plan.SecondaryPKValues, qre.bindVars)
if err != nil {
return nil, err
}
bsc := buildStreamComment(qre.plan.TableInfo, pkRows, secondaryList)
return qre.directFetch(conn, qre.plan.OuterQuery, qre.bindVars, bsc)
} | func (qre *QueryExecutor) execDMLPK(conn poolConn, invalidator CacheInvalidator) (*mproto.QueryResult, error) {
pkRows, err := buildValueList(qre.plan.TableInfo, qre.plan.PKValues, qre.bindVars)
if err != nil {
return nil, err
}
return qre.execDMLPKRows(conn, pkRows, invalidator)
}
func (qre *QueryExecutor) execDMLSubquery(conn poolConn, invalidator CacheInvalidator) (*mproto.QueryResult, error) {
innerResult, err := qre.directFetch(conn, qre.plan.Subquery, qre.bindVars, nil)
if err != nil {
return nil, err
}
return qre.execDMLPKRows(conn, innerResult.Rows, invalidator)
}
func (qre *QueryExecutor) execDMLPKRows(conn poolConn, pkRows [][]sqltypes.Value, invalidator CacheInvalidator) (*mproto.QueryResult, error) {
if len(pkRows) == 0 {
return &mproto.QueryResult{RowsAffected: 0}, nil
}
secondaryList, err := buildSecondaryList(qre.plan.TableInfo, pkRows, qre.plan.SecondaryPKValues, qre.bindVars)
if err != nil {
return nil, err
}
result := &mproto.QueryResult{}
maxRows := int(qre.qe.maxDMLRows.Get())
for i := 0; i < len(pkRows); i += maxRows {
end := i + maxRows
if end >= len(pkRows) {
end = len(pkRows)
}
pkRows := pkRows[i:end]
secondaryList := secondaryList
if secondaryList != nil {
secondaryList = secondaryList[i:end]
}
bsc := buildStreamComment(qre.plan.TableInfo, pkRows, secondaryList)
qre.bindVars["#pk"] = sqlparser.TupleEqualityList{
Columns: qre.plan.TableInfo.Indexes[0].Columns,
Rows: pkRows,
}
r, err := qre.directFetch(conn, qre.plan.OuterQuery, qre.bindVars, bsc)
if err != nil {
return nil, err
}
// DMLs should only return RowsAffected.
result.RowsAffected += r.RowsAffected
}
if invalidator == nil {
return result, nil
}
for _, pk := range pkRows {
key := buildKey(pk)
invalidator.Delete(key)
}
return result, nil
}
func (qre *QueryExecutor) execSet() (*mproto.QueryResult, error) {
switch qre.plan.SetKey {
case "vt_pool_size":
val, err := parseInt64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_pool_size = %v, want to int64", err)
}
qre.qe.connPool.SetCapacity(int(val))
case "vt_stream_pool_size":
val, err := parseInt64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_stream_pool_size = %v, want int64", err)
}
qre.qe.streamConnPool.SetCapacity(int(val))
case "vt_transaction_cap":
val, err := parseInt64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_transaction_cap = %v, want int64", err)
}
qre.qe.txPool.pool.SetCapacity(int(val))
case "vt_transaction_timeout":
val, err := parseDuration(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_transaction_timeout = %v, want int64 or float64", err)
}
qre.qe.txPool.SetTimeout(val)
case "vt_schema_reload_time":
val, err := parseDuration(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_schema_reload_time = %v, want int64 or float64", err)
}
qre.qe.schemaInfo.SetReloadTime(val)
case "vt_query_cache_size":
val, err := parseInt64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_query_cache_size = %v, want int64", err)
}
qre.qe.schemaInfo.SetQueryCacheSize(int(val))
case "vt_max_result_size":
val, err := parseInt64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_max_result_size = %v, want int64", err)
}
if val < 1 {
return nil, NewTabletError(ErrFail, "vt_max_result_size out of range %v", val)
}
qre.qe.maxResultSize.Set(val)
case "vt_max_dml_rows":
val, err := parseInt64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_max_dml_rows = %v, want to int64", err)
}
if val < 1 {
return nil, NewTabletError(ErrFail, "vt_max_dml_rows out of range %v", val)
}
qre.qe.maxDMLRows.Set(val)
case "vt_stream_buffer_size":
val, err := parseInt64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_stream_buffer_size = %v, want int64", err)
}
if val < 1024 {
return nil, NewTabletError(ErrFail, "vt_stream_buffer_size out of range %v", val)
}
qre.qe.streamBufferSize.Set(val)
case "vt_query_timeout":
val, err := parseDuration(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_query_timeout = %v, want int64 or float64", err)
}
qre.qe.queryTimeout.Set(val)
case "vt_idle_timeout":
val, err := parseDuration(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_idle_timeout = %v, want int64 or float64", err)
}
qre.qe.connPool.SetIdleTimeout(val)
qre.qe.streamConnPool.SetIdleTimeout(val)
qre.qe.txPool.pool.SetIdleTimeout(val)
case "vt_spot_check_ratio":
val, err := parseFloat64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_spot_check_ratio = %v, want float64", err)
}
qre.qe.spotCheckFreq.Set(int64(val * spotCheckMultiplier))
case "vt_strict_mode":
val, err := parseInt64(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_strict_mode = %v, want to int64", err)
}
qre.qe.strictMode.Set(val)
case "vt_txpool_timeout":
val, err := parseDuration(qre.plan.SetValue)
if err != nil {
return nil, NewTabletError(ErrFail, "got set vt_txpool_timeout = %v, want int64 or float64", err)
}
qre.qe.txPool.SetPoolTimeout(val)
default:
conn, err := qre.getConn(qre.qe.connPool)
if err != nil {
return nil, err
}
defer conn.Recycle()
return qre.directFetch(conn, qre.plan.FullQuery, qre.bindVars, nil)
}
return &mproto.QueryResult{}, nil
}
func parseInt64(v interface{}) (int64, error) {
if ival, ok := v.(int64); ok {
return ival, nil
}
return -1, NewTabletError(ErrFail, "got %v, want int64", v)
}
func parseFloat64(v interface{}) (float64, error) {
if ival, ok := v.(int64); ok {
return float64(ival), nil
}
if fval, ok := v.(float64); ok {
return fval, nil
}
return -1, NewTabletError(ErrFail, "got %v, want int64 or float64", v)
}
func parseDuration(v interface{}) (time.Duration, error) {
val, err := parseFloat64(v)
if err != nil {
return 0, err
}
// time.Duration is an int64, have to multiple by 1e9 because
// val might be in range (0, 1)
return time.Duration(val * 1e9), nil
}
func rowsAreEqual(row1, row2 []sqltypes.Value) bool {
if len(row1) != len(row2) {
return false
}
for i := 0; i < len(row1); i++ {
if row1[i].IsNull() && row2[i].IsNull() {
continue
}
if (row1[i].IsNull() && !row2[i].IsNull()) || (!row1[i].IsNull() && row2[i].IsNull()) || row1[i].String() != row2[i].String() {
return false
}
}
return true
}
func (qre *QueryExecutor) getConn(pool *ConnPool) (*DBConn, error) {
start := time.Now()
conn, err := pool.Get(qre.ctx)
switch err {
case nil:
qre.logStats.WaitingForConnection += time.Now().Sub(start)
return conn, nil
case ErrConnPoolClosed:
return nil, err
}
return nil, NewTabletErrorSql(ErrFatal, err)
}
func (qre *QueryExecutor) qFetch(logStats *SQLQueryStats, parsedQuery *sqlparser.ParsedQuery, bindVars map[string]interface{}) (*mproto.QueryResult, error) {
sql, err := qre.generateFinalSQL(parsedQuery, bindVars, nil)
if err != nil {
return nil, err
}
q, ok := qre.qe.consolidator.Create(string(sql))
if ok {
defer q.Broadcast()
waitingForConnectionStart := time.Now()
conn, err := qre.qe.connPool.Get(qre.ctx)
logStats.WaitingForConnection += time.Now().Sub(waitingForConnectionStart)
if err != nil {
q.Err = NewTabletErrorSql(ErrFatal, err)
} else {
defer conn.Recycle()
q.Result, q.Err = qre.execSQL(conn, sql, false)
}
} else {
logStats.QuerySources |= QuerySourceConsolidator
startTime := time.Now()
q.Wait()
qre.qe.queryServiceStats.WaitStats.Record("Consolidations", startTime)
}
if q.Err != nil {
return nil, q.Err
}
return q.Result.(*mproto.QueryResult), nil
}
func (qre *QueryExecutor) directFetch(conn poolConn, parsedQuery *sqlparser.ParsedQuery, bindVars map[string]interface{}, buildStreamComment []byte) (*mproto.QueryResult, error) {
sql, err := qre.generateFinalSQL(parsedQuery, bindVars, buildStreamComment)
if err != nil {
return nil, err
}
return qre.execSQL(conn, sql, false)
}
// fullFetch also fetches field info
func (qre *QueryExecutor) fullFetch(conn poolConn, parsedQuery *sqlparser.ParsedQuery, bindVars map[string]interface{}, buildStreamComment []byte) (*mproto.QueryResult, error) {
sql, err := qre.generateFinalSQL(parsedQuery, bindVars, buildStreamComment)
if err != nil {
return nil, err
}
return qre.execSQL(conn, sql, true)
}
func (qre *QueryExecutor) fullStreamFetch(conn *DBConn, parsedQuery *sqlparser.ParsedQuery, bindVars map[string]interface{}, buildStreamComment []byte, callback func(*mproto.QueryResult) error) error {
sql, err := qre.generateFinalSQL(parsedQuery, bindVars, buildStreamComment)
if err != nil {
return err
}
return qre.execStreamSQL(conn, sql, callback)
}
func (qre *QueryExecutor) generateFinalSQL(parsedQuery *sqlparser.ParsedQuery, bindVars map[string]interface{}, buildStreamComment []byte) (string, error) {
bindVars["#maxLimit"] = qre.qe.maxResultSize.Get() + 1
sql, err := parsedQuery.GenerateQuery(bindVars)
if err != nil {
return "", NewTabletError(ErrFail, "%s", err)
}
if buildStreamComment != nil {
sql = append(sql, buildStreamComment...)
}
// undo hack done by stripTrailing
sql = restoreTrailing(sql, bindVars)
return hack.String(sql), nil
}
func (qre *QueryExecutor) execSQL(conn poolConn, sql string, wantfields bool) (*mproto.QueryResult, error) {
defer qre.logStats.AddRewrittenSql(sql, time.Now())
return conn.Exec(qre.ctx, sql, int(qre.qe.maxResultSize.Get()), wantfields)
}
func (qre *QueryExecutor) execStreamSQL(conn *DBConn, sql string, callback func(*mproto.QueryResult) error) error {
start := time.Now()
err := conn.Stream(qre.ctx, sql, callback, int(qre.qe.streamBufferSize.Get()))
qre.logStats.AddRewrittenSql(sql, start)
if err != nil {
return NewTabletErrorSql(ErrFail, err)
}
return nil
} | |
_flavours.py | from __future__ import annotations
from pathlib import _PosixFlavour, _WindowsFlavour
from typing import Optional, Callable, Awaitable, Dict, List, TYPE_CHECKING
from errno import EINVAL
import os
import sys
from aiopath.wrap import func_to_async_func as wrap_async
try:
from pathlib import _getfinalpathname
_async_getfinalpathname = wrap_async(_getfinalpathname)
except ImportError:
def _getfinalpathname(*args, **kwargs):
raise ImportError("_getfinalpathname() requires a Windows/NT platform")
async def _async_getfinalpathname(*args, **kwargs):
raise ImportError("_getfinalpathname() requires a Windows/NT platform")
if TYPE_CHECKING: # keep mypy quiet
from ._base import AsyncSyncPath, _AsyncSyncAccessor
class _AsyncSyncPosixFlavour(_PosixFlavour):
def gethomedir(self, username: str) -> str:
return super().gethomedir(username)
async def async_gethomedir(self, username: str) -> str:
gethomedir: Callable[[str], Awaitable[str]] = wrap_async(super().gethomedir)
return await gethomedir(username)
def resolve(self, path: AsyncSyncPath, strict: bool = False) -> Optional[str]:
sep: str = self.sep
accessor: '_AsyncSyncAccessor' = path._accessor
seen: Dict[str, Optional[str]] = {}
def _resolve(path: str, rest: str) -> str:
if rest.startswith(sep): path = ''
for name in rest.split(sep):
if not name or name == '.': continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + name if path.endswith(sep) else path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None: continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError(f"Symlink loop from {newpath}")
# Resolve the symbolic link
try: target = accessor.readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict: raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
result = _resolve(base, str(path))
return result or sep
async def async_resolve(self, path: AsyncSyncPath, strict: bool = False) -> Optional[str]:
sep: str = self.sep
accessor: '_AsyncSyncAccessor' = path._accessor
seen: Dict[str, Optional[str]] = {}
async def _resolve(path: str, rest: str) -> str:
if rest.startswith(sep): path = ''
for name in rest.split(sep):
if not name or name == '.': continue
if name == '..':
# parent dir
path, _, _ = path.rpartition(sep)
continue
newpath = path + name if path.endswith(sep) else path + sep + name
if newpath in seen:
# Already seen this path
path = seen[newpath]
if path is not None: continue
# The symlink is not resolved, so we must have a symlink loop.
raise RuntimeError(f"Symlink loop from {newpath}")
# Resolve the symbolic link
try: target = await accessor.async_readlink(newpath)
except OSError as e:
if e.errno != EINVAL and strict: raise
# Not a symlink, or non-strict mode. We just leave the path
# untouched.
path = newpath
else:
seen[newpath] = None # not resolved symlink
path = await _resolve(path, target)
seen[newpath] = path # resolved symlink
return path
# NOTE: according to POSIX, getcwd() cannot contain path components
# which are symlinks.
base = '' if path.is_absolute() else os.getcwd()
result = await _resolve(base, str(path))
return result or sep
class _AsyncSyncWindowsFlavour(_WindowsFlavour):
def gethomedir(self, username: str) -> str:
return super().gethomedir(username)
async def | (self, username: str) -> str:
gethomedir: Callable[[str], Awaitable[str]] = wrap_async(super().gethomedir)
return await gethomedir(username)
def resolve(self, path: 'AsyncSyncPath', strict: bool = False) -> Optional[str]:
s = str(path)
if not s: return os.getcwd()
previous_s: Optional[str] = None
if _getfinalpathname is not None:
if strict: return self._ext_to_normal(_getfinalpathname(s))
else:
tail_parts: List[str] = [] # End of the path after the first one not found
while True:
try: s = self._ext_to_normal(_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s: return path
else: return os.path.join(s, *reversed(tail_parts))
return None
async def async_resolve(self, path: 'AsyncSyncPath', strict: bool = False) -> Optional[str]:
s = str(path)
if not s: return os.getcwd()
previous_s: Optional[str] = None
if _async_getfinalpathname is not None:
if strict: return self._ext_to_normal(await _async_getfinalpathname(s))
else:
tail_parts: List[str] = [] # End of the path after the first one not found
while True:
try: s = self._ext_to_normal(await _async_getfinalpathname(s))
except FileNotFoundError:
previous_s = s
s, tail = os.path.split(s)
tail_parts.append(tail)
if previous_s == s: return path
else: return os.path.join(s, *reversed(tail_parts))
return None
_async_sync_windows_flavour = _AsyncSyncWindowsFlavour()
_async_sync_posix_flavour = _AsyncSyncPosixFlavour()
| async_gethomedir |
setup.py | from setuptools import setup
setup(name='compylex',
version='1.1',
description='Code compiler',
url='https://github.com/AshiqAbdulkhader/Compylex',
packages=['compylex'],
install_requires=[], | zip_safe=False
) | license='MIT License',
author='Muhammed Ashiq Abdul Khader',
author_email='[email protected]', |
auth.guard.ts | /*
Guards are classes that are decorated with the @Injectable() decorator and implement the CanActivate interface. A guard is responsible for determining if a request should be
handled by a route handler or route. Guards are executed after every middleware, but before pipes. Unlike middleware, guardshave access to the ExecutionContext object,
so they know exactly what is going to evaluated.
*/
import { CanActivate, ExecutionContext } from '@nestjs/common';
export class AuthGuard implements CanActivate {
canActivate(context: ExecutionContext) { | }
} | const request = context.switchToHttp().getRequest();
return request.session.userId; |
randomized_relu.py | import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from theano.sandbox.rng_mrg import MRG_RandomStreams
@treeano.register_node("randomized_relu")
class RandomizedReLUNode(treeano.NodeImpl):
"""
from "Empirical Evaluation of Rectified Activations in Convolutional
Network"
http://arxiv.org/abs/1505.00853
"""
hyperparameter_names = ("alpha_lower",
"alpha_upper",
"deterministic")
def compute_output(self, network, in_vw):
# gather hyperparameters
deterministic = network.find_hyperparameter(["deterministic"])
l = network.find_hyperparameter(["alpha_lower"],
3)
u = network.find_hyperparameter(["alpha_upper"],
8)
if deterministic:
negative_coefficient = 2.0 / (l + u)
else:
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
alphas = srng.uniform(size=in_vw.symbolic_shape(),
low=l,
high=u)
negative_coefficient = 1.0 / alphas
# return output
network.create_vw(
"default",
variable=treeano.utils.rectify(
in_vw.variable,
negative_coefficient=negative_coefficient),
shape=in_vw.shape,
tags={"output"},
)
@treeano.register_node("uniform_randomized_relu")
class UniformRandomizedReLUNode(treeano.NodeImpl):
"""
like RandomizedReLUNode, but instead of sampling from 1 / uniform(l, u),
sample from uniform(l, u)
"""
hyperparameter_names = ("alpha_lower",
"alpha_upper",
"deterministic")
def compute_output(self, network, in_vw):
# gather hyperparameters
deterministic = network.find_hyperparameter(["deterministic"])
l = network.find_hyperparameter(["alpha_lower"],
1 / 8.)
u = network.find_hyperparameter(["alpha_upper"],
1 / 3.)
if deterministic:
negative_coefficient = (l + u) / 2.
else:
# TODO save this state so that we can seed the rng
srng = MRG_RandomStreams()
negative_coefficient = srng.uniform(size=in_vw.symbolic_shape(),
low=l,
high=u)
# return output
network.create_vw(
"default",
variable=treeano.utils.rectify(
in_vw.variable,
negative_coefficient=negative_coefficient),
shape=in_vw.shape,
tags={"output"},
)
@treeano.register_node("random_walk_relu")
class RandomWalkReLUNode(treeano.NodeImpl):
| """
leaky ReLU node, where leak alpha changes randomly over time
"""
hyperparameter_names = ("step_size",
"initial_alpha",
"inits")
def compute_output(self, network, in_vw):
# gather hyperparameters
initial_alpha = network.find_hyperparameter(
["initial_alpha"],
0)
alpha = network.create_vw(
"alpha",
is_shared=True,
shape=(in_vw.shape[1],),
tags={"state"},
default_inits=[treeano.inits.ConstantInit(initial_alpha)],
).variable
pattern = ["x"] * in_vw.ndim
pattern[1] = 0
alpha_b = alpha.dimshuffle(*pattern)
# return output
network.create_vw(
"default",
variable=treeano.utils.rectify(in_vw.variable,
negative_coefficient=alpha_b),
shape=in_vw.shape,
tags={"output"},
)
def new_update_deltas(self, network):
alpha_vw = network.get_vw("alpha")
step_size = network.find_hyperparameter(["step_size"])
# NOTE: each MRG_RandomStreams has the same seed, so
# all nodes with the same shape end up with the same alphas
srng = MRG_RandomStreams()
steps = srng.uniform(size=alpha_vw.shape,
low=-step_size,
high=step_size)
# TODO clip value of alpha (to prevent it becoming linear)
return treeano.UpdateDeltas({alpha_vw.variable: steps}) |
|
package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class | (AutotoolsPackage, XorgPackage):
"""Xevie - X Event Interception Extension (XEvIE)."""
homepage = "https://cgit.freedesktop.org/xorg/lib/libXevie"
xorg_mirror_path = "lib/libXevie-1.0.3.tar.gz"
version('1.0.3', sha256='3759bb1f7fdade13ed99bfc05c0717bc42ce3f187e7da4eef80beddf5e461258')
depends_on('libx11')
depends_on('libxext')
depends_on('xproto')
depends_on('xextproto')
depends_on('evieext')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| Libxevie |
a.py | from bs4 import BeautifulSoup
from datetime import datetime
import requests
import time
def get_code(company_code):
url="https://finance.naver.com/item/main.nhn?code=" + company_code
result = requests.get(url)
bs_obj = BeautifulSoup(result.content, "html.parser")
return bs_obj
def get_price(company_code):
|
company_codes = ["175250","153490"]
prices =[]
while True:
now = datetime.now()
print(now)
for item in company_codes:
now_price = get_price(item)
# print(now_price, company_codes)
prices.append(now_price)
# print("------------------------")
print(prices)
prices =[]
time.sleep(60) | bs_obj = get_code(company_code)
no_today = bs_obj.find("p", {"class": "no_today"})
blind = no_today.find("span", {"class": 'blind'})
now_price = blind.text
return now_price |
ovf_test.go | /*
Copyright (c) 2015 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ovf
import (
"bytes"
"fmt"
"os"
"testing"
"text/tabwriter"
)
func testEnvelope(t *testing.T, fn string) *Envelope {
f, err := os.Open(fn)
if err != nil {
t.Fatalf("error opening %s %s", fn, err)
}
defer f.Close()
e, err := Unmarshal(f)
if err != nil {
t.Fatalf("error unmarshaling test file %s", err)
}
if e == nil {
t.Fatal("empty envelope")
}
return e
}
func TestUnmarshal(t *testing.T) |
func TestDeploymentOptions(t *testing.T) {
fn := os.Getenv("OVF_TEST_FILE")
if fn == "" {
t.Skip("OVF_TEST_FILE not specified")
}
e := testEnvelope(t, fn)
if e.DeploymentOption == nil {
t.Fatal("DeploymentOptionSection empty")
}
var b bytes.Buffer
tw := tabwriter.NewWriter(&b, 2, 0, 2, ' ', 0)
fmt.Fprintf(tw, "\n")
for _, c := range e.DeploymentOption.Configuration {
fmt.Fprintf(tw, "id=%s\t", c.ID)
fmt.Fprintf(tw, "label=%s\t", c.Label)
d := false
if c.Default != nil {
d = *c.Default
}
fmt.Fprintf(tw, "default=%t\t", d)
fmt.Fprintf(tw, "\n")
}
tw.Flush()
t.Log(b.String())
}
| {
e := testEnvelope(t, "fixtures/ttylinux.ovf")
hw := e.VirtualSystem.VirtualHardware[0]
if n := len(hw.Config); n != 3 {
t.Errorf("Config=%d", n)
}
if n := len(hw.ExtraConfig); n != 2 {
t.Errorf("ExtraConfig=%d", n)
}
for i, c := range append(hw.Config, hw.ExtraConfig...) {
if *c.Required {
t.Errorf("%d: Required=%t", i, *c.Required)
}
if c.Key == "" {
t.Errorf("%d: key=''", i)
}
if c.Value == "" {
t.Errorf("%d: value=''", i)
}
}
} |
api.go | // Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package elasticache provides a client for Amazon ElastiCache.
package elasticache
import (
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/query"
)
const opAddTagsToResource = "AddTagsToResource"
// AddTagsToResourceRequest generates a "aws/request.Request" representing the
// client's request for the AddTagsToResource operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See AddTagsToResource for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the AddTagsToResource method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the AddTagsToResourceRequest method.
// req, resp := client.AddTagsToResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AddTagsToResource
func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *TagListMessage) {
op := &request.Operation{
Name: opAddTagsToResource,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &AddTagsToResourceInput{}
}
output = &TagListMessage{}
req = c.newRequest(op, input, output)
return
}
// AddTagsToResource API operation for Amazon ElastiCache.
//
// Adds up to 50 cost allocation tags to the named resource. A cost allocation
// tag is a key-value pair where the key and value are case-sensitive. You can
// use cost allocation tags to categorize and track your AWS costs.
//
// When you apply tags to your ElastiCache resources, AWS generates a cost allocation
// report as a comma-separated value (CSV) file with your usage and costs aggregated
// by your tags. You can apply tags that represent business categories (such
// as cost centers, application names, or owners) to organize your costs across
// multiple services. For more information, see Using Cost Allocation Tags in
// Amazon ElastiCache (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Tagging.html)
// in the ElastiCache User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation AddTagsToResource for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault"
// The requested snapshot name does not refer to an existing snapshot.
//
// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded"
// The request cannot be processed because it would cause the resource to have
// more than the allowed number of tags. The maximum number of tags permitted
// on a resource is 50.
//
// * ErrCodeInvalidARNFault "InvalidARN"
// The requested Amazon Resource Name (ARN) does not refer to an existing resource.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AddTagsToResource
func (c *ElastiCache) AddTagsToResource(input *AddTagsToResourceInput) (*TagListMessage, error) {
req, out := c.AddTagsToResourceRequest(input)
return out, req.Send()
}
// AddTagsToResourceWithContext is the same as AddTagsToResource with the addition of
// the ability to pass a context and additional request options.
//
// See AddTagsToResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) AddTagsToResourceWithContext(ctx aws.Context, input *AddTagsToResourceInput, opts ...request.Option) (*TagListMessage, error) {
req, out := c.AddTagsToResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opAuthorizeCacheSecurityGroupIngress = "AuthorizeCacheSecurityGroupIngress"
// AuthorizeCacheSecurityGroupIngressRequest generates a "aws/request.Request" representing the
// client's request for the AuthorizeCacheSecurityGroupIngress operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See AuthorizeCacheSecurityGroupIngress for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the AuthorizeCacheSecurityGroupIngress method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the AuthorizeCacheSecurityGroupIngressRequest method.
// req, resp := client.AuthorizeCacheSecurityGroupIngressRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AuthorizeCacheSecurityGroupIngress
func (c *ElastiCache) AuthorizeCacheSecurityGroupIngressRequest(input *AuthorizeCacheSecurityGroupIngressInput) (req *request.Request, output *AuthorizeCacheSecurityGroupIngressOutput) {
op := &request.Operation{
Name: opAuthorizeCacheSecurityGroupIngress,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &AuthorizeCacheSecurityGroupIngressInput{}
}
output = &AuthorizeCacheSecurityGroupIngressOutput{}
req = c.newRequest(op, input, output)
return
}
// AuthorizeCacheSecurityGroupIngress API operation for Amazon ElastiCache.
//
// Allows network ingress to a cache security group. Applications using ElastiCache
// must be running on Amazon EC2, and Amazon EC2 security groups are used as
// the authorization mechanism.
//
// You cannot authorize ingress from an Amazon EC2 security group in one region
// to an ElastiCache cluster in another region.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation AuthorizeCacheSecurityGroupIngress for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound"
// The requested cache security group name does not refer to an existing cache
// security group.
//
// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState"
// The current state of the cache security group does not allow deletion.
//
// * ErrCodeAuthorizationAlreadyExistsFault "AuthorizationAlreadyExists"
// The specified Amazon EC2 security group is already authorized for the specified
// cache security group.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AuthorizeCacheSecurityGroupIngress
func (c *ElastiCache) AuthorizeCacheSecurityGroupIngress(input *AuthorizeCacheSecurityGroupIngressInput) (*AuthorizeCacheSecurityGroupIngressOutput, error) {
req, out := c.AuthorizeCacheSecurityGroupIngressRequest(input)
return out, req.Send()
}
// AuthorizeCacheSecurityGroupIngressWithContext is the same as AuthorizeCacheSecurityGroupIngress with the addition of
// the ability to pass a context and additional request options.
//
// See AuthorizeCacheSecurityGroupIngress for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) AuthorizeCacheSecurityGroupIngressWithContext(ctx aws.Context, input *AuthorizeCacheSecurityGroupIngressInput, opts ...request.Option) (*AuthorizeCacheSecurityGroupIngressOutput, error) {
req, out := c.AuthorizeCacheSecurityGroupIngressRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCopySnapshot = "CopySnapshot"
// CopySnapshotRequest generates a "aws/request.Request" representing the
// client's request for the CopySnapshot operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See CopySnapshot for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the CopySnapshot method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the CopySnapshotRequest method.
// req, resp := client.CopySnapshotRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CopySnapshot
func (c *ElastiCache) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) {
op := &request.Operation{
Name: opCopySnapshot,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CopySnapshotInput{}
}
output = &CopySnapshotOutput{}
req = c.newRequest(op, input, output)
return
}
// CopySnapshot API operation for Amazon ElastiCache.
//
// Makes a copy of an existing snapshot.
//
// This operation is valid for Redis only.
//
// Users or groups that have permissions to use the CopySnapshot operation can
// create their own Amazon S3 buckets and copy snapshots to it. To control access
// to your snapshots, use an IAM policy to control who has the ability to use
// the CopySnapshot operation. For more information about using IAM to control
// the use of ElastiCache operations, see Exporting Snapshots (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html)
// and Authentication & Access Control (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/IAM.html).
//
// You could receive the following error messages.
//
// Error Messages
//
// * Error Message: The S3 bucket %s is outside of the region.
//
// Solution: Create an Amazon S3 bucket in the same region as your snapshot.
// For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket)
// in the ElastiCache User Guide.
//
// * Error Message: The S3 bucket %s does not exist.
//
// Solution: Create an Amazon S3 bucket in the same region as your snapshot.
// For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket)
// in the ElastiCache User Guide.
//
// * Error Message: The S3 bucket %s is not owned by the authenticated user.
//
// Solution: Create an Amazon S3 bucket in the same region as your snapshot.
// For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket)
// in the ElastiCache User Guide.
//
// * Error Message: The authenticated user does not have sufficient permissions
// to perform the desired activity.
//
// Solution: Contact your system administrator to get the needed permissions.
//
// * Error Message: The S3 bucket %s already contains an object with key
// %s.
//
// Solution: Give the TargetSnapshotName a new and unique value. If exporting
// a snapshot, you could alternatively create a new Amazon S3 bucket and
// use this same value for TargetSnapshotName.
//
// * Error Message: ElastiCache has not been granted READ permissions %s
// on the S3 Bucket.
//
// Solution: Add List and Read permissions on the bucket. For more information,
// see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess)
// in the ElastiCache User Guide.
//
// * Error Message: ElastiCache has not been granted WRITE permissions %s
// on the S3 Bucket.
//
// Solution: Add Upload/Delete permissions on the bucket. For more information,
// see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess)
// in the ElastiCache User Guide.
//
// * Error Message: ElastiCache has not been granted READ_ACP permissions
// %s on the S3 Bucket.
//
// Solution: Add View Permissions on the bucket. For more information, see Step
// 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess)
// in the ElastiCache User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation CopySnapshot for usage and error information.
//
// Returned Error Codes:
// * ErrCodeSnapshotAlreadyExistsFault "SnapshotAlreadyExistsFault"
// You already have a snapshot with the given name.
//
// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault"
// The requested snapshot name does not refer to an existing snapshot.
//
// * ErrCodeSnapshotQuotaExceededFault "SnapshotQuotaExceededFault"
// The request cannot be processed because it would exceed the maximum number
// of snapshots.
//
// * ErrCodeInvalidSnapshotStateFault "InvalidSnapshotState"
// The current state of the snapshot does not allow the requested operation
// to occur.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CopySnapshot
func (c *ElastiCache) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) {
req, out := c.CopySnapshotRequest(input)
return out, req.Send()
}
// CopySnapshotWithContext is the same as CopySnapshot with the addition of
// the ability to pass a context and additional request options.
//
// See CopySnapshot for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) CopySnapshotWithContext(ctx aws.Context, input *CopySnapshotInput, opts ...request.Option) (*CopySnapshotOutput, error) {
req, out := c.CopySnapshotRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateCacheCluster = "CreateCacheCluster"
// CreateCacheClusterRequest generates a "aws/request.Request" representing the
// client's request for the CreateCacheCluster operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See CreateCacheCluster for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the CreateCacheCluster method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the CreateCacheClusterRequest method.
// req, resp := client.CreateCacheClusterRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheCluster
func (c *ElastiCache) CreateCacheClusterRequest(input *CreateCacheClusterInput) (req *request.Request, output *CreateCacheClusterOutput) {
op := &request.Operation{
Name: opCreateCacheCluster,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateCacheClusterInput{}
}
output = &CreateCacheClusterOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateCacheCluster API operation for Amazon ElastiCache.
//
// Creates a cache cluster. All nodes in the cache cluster run the same protocol-compliant
// cache engine software, either Memcached or Redis.
//
// Due to current limitations on Redis (cluster mode disabled), this operation
// or parameter is not supported on Redis (cluster mode enabled) replication
// groups.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation CreateCacheCluster for usage and error information.
//
// Returned Error Codes:
// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault"
// The specified replication group does not exist.
//
// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState"
// The requested replication group is not in the available state.
//
// * ErrCodeCacheClusterAlreadyExistsFault "CacheClusterAlreadyExists"
// You already have a cache cluster with the given identifier.
//
// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity"
// The requested cache node type is not available in the specified Availability
// Zone.
//
// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound"
// The requested cache security group name does not refer to an existing cache
// security group.
//
// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault"
// The requested cache subnet group name does not refer to an existing cache
// subnet group.
//
// * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache clusters per customer.
//
// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache nodes in a single cache cluster.
//
// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache nodes per customer.
//
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault"
// The VPC network is in an invalid state.
//
// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded"
// The request cannot be processed because it would cause the resource to have
// more than the allowed number of tags. The maximum number of tags permitted
// on a resource is 50.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheCluster
func (c *ElastiCache) CreateCacheCluster(input *CreateCacheClusterInput) (*CreateCacheClusterOutput, error) {
req, out := c.CreateCacheClusterRequest(input)
return out, req.Send()
}
// CreateCacheClusterWithContext is the same as CreateCacheCluster with the addition of
// the ability to pass a context and additional request options.
//
// See CreateCacheCluster for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) CreateCacheClusterWithContext(ctx aws.Context, input *CreateCacheClusterInput, opts ...request.Option) (*CreateCacheClusterOutput, error) {
req, out := c.CreateCacheClusterRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateCacheParameterGroup = "CreateCacheParameterGroup"
// CreateCacheParameterGroupRequest generates a "aws/request.Request" representing the
// client's request for the CreateCacheParameterGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See CreateCacheParameterGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the CreateCacheParameterGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the CreateCacheParameterGroupRequest method.
// req, resp := client.CreateCacheParameterGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheParameterGroup
func (c *ElastiCache) CreateCacheParameterGroupRequest(input *CreateCacheParameterGroupInput) (req *request.Request, output *CreateCacheParameterGroupOutput) {
op := &request.Operation{
Name: opCreateCacheParameterGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateCacheParameterGroupInput{}
}
output = &CreateCacheParameterGroupOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateCacheParameterGroup API operation for Amazon ElastiCache.
//
// Creates a new Amazon ElastiCache cache parameter group. An ElastiCache cache
// parameter group is a collection of parameters and their values that are applied
// to all of the nodes in any cache cluster or replication group using the CacheParameterGroup.
//
// A newly created CacheParameterGroup is an exact duplicate of the default
// parameter group for the CacheParameterGroupFamily. To customize the newly
// created CacheParameterGroup you can change the values of specific parameters.
// For more information, see:
//
// * ModifyCacheParameterGroup (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheParameterGroup.html)
// in the ElastiCache API Reference.
//
// * Parameters and Parameter Groups (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/ParameterGroups.html)
// in the ElastiCache User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation CreateCacheParameterGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheParameterGroupQuotaExceededFault "CacheParameterGroupQuotaExceeded"
// The request cannot be processed because it would exceed the maximum number
// of cache security groups.
//
// * ErrCodeCacheParameterGroupAlreadyExistsFault "CacheParameterGroupAlreadyExists"
// A cache parameter group with the requested name already exists.
//
// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState"
// The current state of the cache parameter group does not allow the requested
// operation to occur.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheParameterGroup
func (c *ElastiCache) CreateCacheParameterGroup(input *CreateCacheParameterGroupInput) (*CreateCacheParameterGroupOutput, error) {
req, out := c.CreateCacheParameterGroupRequest(input)
return out, req.Send()
}
// CreateCacheParameterGroupWithContext is the same as CreateCacheParameterGroup with the addition of
// the ability to pass a context and additional request options.
//
// See CreateCacheParameterGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) CreateCacheParameterGroupWithContext(ctx aws.Context, input *CreateCacheParameterGroupInput, opts ...request.Option) (*CreateCacheParameterGroupOutput, error) {
req, out := c.CreateCacheParameterGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateCacheSecurityGroup = "CreateCacheSecurityGroup"
// CreateCacheSecurityGroupRequest generates a "aws/request.Request" representing the
// client's request for the CreateCacheSecurityGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See CreateCacheSecurityGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the CreateCacheSecurityGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the CreateCacheSecurityGroupRequest method.
// req, resp := client.CreateCacheSecurityGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSecurityGroup
func (c *ElastiCache) CreateCacheSecurityGroupRequest(input *CreateCacheSecurityGroupInput) (req *request.Request, output *CreateCacheSecurityGroupOutput) {
op := &request.Operation{
Name: opCreateCacheSecurityGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateCacheSecurityGroupInput{}
}
output = &CreateCacheSecurityGroupOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateCacheSecurityGroup API operation for Amazon ElastiCache.
//
// Creates a new cache security group. Use a cache security group to control
// access to one or more cache clusters.
//
// Cache security groups are only used when you are creating a cache cluster
// outside of an Amazon Virtual Private Cloud (Amazon VPC). If you are creating
// a cache cluster inside of a VPC, use a cache subnet group instead. For more
// information, see CreateCacheSubnetGroup (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_CreateCacheSubnetGroup.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation CreateCacheSecurityGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheSecurityGroupAlreadyExistsFault "CacheSecurityGroupAlreadyExists"
// A cache security group with the specified name already exists.
//
// * ErrCodeCacheSecurityGroupQuotaExceededFault "QuotaExceeded.CacheSecurityGroup"
// The request cannot be processed because it would exceed the allowed number
// of cache security groups.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSecurityGroup
func (c *ElastiCache) CreateCacheSecurityGroup(input *CreateCacheSecurityGroupInput) (*CreateCacheSecurityGroupOutput, error) {
req, out := c.CreateCacheSecurityGroupRequest(input)
return out, req.Send()
}
// CreateCacheSecurityGroupWithContext is the same as CreateCacheSecurityGroup with the addition of
// the ability to pass a context and additional request options.
//
// See CreateCacheSecurityGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) CreateCacheSecurityGroupWithContext(ctx aws.Context, input *CreateCacheSecurityGroupInput, opts ...request.Option) (*CreateCacheSecurityGroupOutput, error) {
req, out := c.CreateCacheSecurityGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateCacheSubnetGroup = "CreateCacheSubnetGroup"
// CreateCacheSubnetGroupRequest generates a "aws/request.Request" representing the
// client's request for the CreateCacheSubnetGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See CreateCacheSubnetGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the CreateCacheSubnetGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the CreateCacheSubnetGroupRequest method.
// req, resp := client.CreateCacheSubnetGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSubnetGroup
func (c *ElastiCache) CreateCacheSubnetGroupRequest(input *CreateCacheSubnetGroupInput) (req *request.Request, output *CreateCacheSubnetGroupOutput) {
op := &request.Operation{
Name: opCreateCacheSubnetGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateCacheSubnetGroupInput{}
}
output = &CreateCacheSubnetGroupOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateCacheSubnetGroup API operation for Amazon ElastiCache.
//
// Creates a new cache subnet group.
//
// Use this parameter only when you are creating a cluster in an Amazon Virtual
// Private Cloud (Amazon VPC).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation CreateCacheSubnetGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheSubnetGroupAlreadyExistsFault "CacheSubnetGroupAlreadyExists"
// The requested cache subnet group name is already in use by an existing cache
// subnet group.
//
// * ErrCodeCacheSubnetGroupQuotaExceededFault "CacheSubnetGroupQuotaExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache subnet groups.
//
// * ErrCodeCacheSubnetQuotaExceededFault "CacheSubnetQuotaExceededFault"
// The request cannot be processed because it would exceed the allowed number
// of subnets in a cache subnet group.
//
// * ErrCodeInvalidSubnet "InvalidSubnet"
// An invalid subnet identifier was specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSubnetGroup
func (c *ElastiCache) CreateCacheSubnetGroup(input *CreateCacheSubnetGroupInput) (*CreateCacheSubnetGroupOutput, error) {
req, out := c.CreateCacheSubnetGroupRequest(input)
return out, req.Send()
}
// CreateCacheSubnetGroupWithContext is the same as CreateCacheSubnetGroup with the addition of
// the ability to pass a context and additional request options.
//
// See CreateCacheSubnetGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) CreateCacheSubnetGroupWithContext(ctx aws.Context, input *CreateCacheSubnetGroupInput, opts ...request.Option) (*CreateCacheSubnetGroupOutput, error) {
req, out := c.CreateCacheSubnetGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateReplicationGroup = "CreateReplicationGroup"
// CreateReplicationGroupRequest generates a "aws/request.Request" representing the
// client's request for the CreateReplicationGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See CreateReplicationGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the CreateReplicationGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the CreateReplicationGroupRequest method.
// req, resp := client.CreateReplicationGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateReplicationGroup
func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGroupInput) (req *request.Request, output *CreateReplicationGroupOutput) {
op := &request.Operation{
Name: opCreateReplicationGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateReplicationGroupInput{}
}
output = &CreateReplicationGroupOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateReplicationGroup API operation for Amazon ElastiCache.
//
// Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled)
// replication group.
//
// A Redis (cluster mode disabled) replication group is a collection of cache
// clusters, where one of the cache clusters is a read/write primary and the
// others are read-only replicas. Writes to the primary are asynchronously propagated
// to the replicas.
//
// A Redis (cluster mode enabled) replication group is a collection of 1 to
// 15 node groups (shards). Each node group (shard) has one read/write primary
// node and up to 5 read-only replica nodes. Writes to the primary are asynchronously
// propagated to the replicas. Redis (cluster mode enabled) replication groups
// partition the data across node groups (shards).
//
// When a Redis (cluster mode disabled) replication group has been successfully
// created, you can add one or more read replicas to it, up to a total of 5
// read replicas. You cannot alter a Redis (cluster mode enabled) replication
// group after it has been created. However, if you need to increase or decrease
// the number of node groups (console: shards), you can avail yourself of ElastiCache
// for Redis' enhanced backup and restore. For more information, see Restoring
// From a Backup with Cluster Resizing (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/backups-restoring.html)
// in the ElastiCache User Guide.
//
// This operation is valid for Redis only.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation CreateReplicationGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState"
// The requested cache cluster is not in the available state.
//
// * ErrCodeReplicationGroupAlreadyExistsFault "ReplicationGroupAlreadyExists"
// The specified replication group already exists.
//
// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity"
// The requested cache node type is not available in the specified Availability
// Zone.
//
// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound"
// The requested cache security group name does not refer to an existing cache
// security group.
//
// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault"
// The requested cache subnet group name does not refer to an existing cache
// subnet group.
//
// * ErrCodeClusterQuotaForCustomerExceededFault "ClusterQuotaForCustomerExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache clusters per customer.
//
// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache nodes in a single cache cluster.
//
// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache nodes per customer.
//
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault"
// The VPC network is in an invalid state.
//
// * ErrCodeTagQuotaPerResourceExceeded "TagQuotaPerResourceExceeded"
// The request cannot be processed because it would cause the resource to have
// more than the allowed number of tags. The maximum number of tags permitted
// on a resource is 50.
//
// * ErrCodeNodeGroupsPerReplicationGroupQuotaExceededFault "NodeGroupsPerReplicationGroupQuotaExceeded"
// The request cannot be processed because it would exceed the maximum of 15
// node groups (shards) in a single replication group.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateReplicationGroup
func (c *ElastiCache) CreateReplicationGroup(input *CreateReplicationGroupInput) (*CreateReplicationGroupOutput, error) {
req, out := c.CreateReplicationGroupRequest(input)
return out, req.Send()
}
// CreateReplicationGroupWithContext is the same as CreateReplicationGroup with the addition of
// the ability to pass a context and additional request options.
//
// See CreateReplicationGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) CreateReplicationGroupWithContext(ctx aws.Context, input *CreateReplicationGroupInput, opts ...request.Option) (*CreateReplicationGroupOutput, error) {
req, out := c.CreateReplicationGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opCreateSnapshot = "CreateSnapshot"
// CreateSnapshotRequest generates a "aws/request.Request" representing the
// client's request for the CreateSnapshot operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See CreateSnapshot for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the CreateSnapshot method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the CreateSnapshotRequest method.
// req, resp := client.CreateSnapshotRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateSnapshot
func (c *ElastiCache) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) {
op := &request.Operation{
Name: opCreateSnapshot,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &CreateSnapshotInput{}
}
output = &CreateSnapshotOutput{}
req = c.newRequest(op, input, output)
return
}
// CreateSnapshot API operation for Amazon ElastiCache.
//
// Creates a copy of an entire cache cluster or replication group at a specific
// moment in time.
//
// This operation is valid for Redis only.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation CreateSnapshot for usage and error information.
//
// Returned Error Codes:
// * ErrCodeSnapshotAlreadyExistsFault "SnapshotAlreadyExistsFault"
// You already have a snapshot with the given name.
//
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault"
// The specified replication group does not exist.
//
// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState"
// The requested cache cluster is not in the available state.
//
// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState"
// The requested replication group is not in the available state.
//
// * ErrCodeSnapshotQuotaExceededFault "SnapshotQuotaExceededFault"
// The request cannot be processed because it would exceed the maximum number
// of snapshots.
//
// * ErrCodeSnapshotFeatureNotSupportedFault "SnapshotFeatureNotSupportedFault"
// You attempted one of the following operations:
//
// * Creating a snapshot of a Redis cache cluster running on a cache.t1.micro
// cache node.
//
// * Creating a snapshot of a cache cluster that is running Memcached rather
// than Redis.
//
// Neither of these are supported by ElastiCache.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateSnapshot
func (c *ElastiCache) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapshotOutput, error) {
req, out := c.CreateSnapshotRequest(input)
return out, req.Send()
}
// CreateSnapshotWithContext is the same as CreateSnapshot with the addition of
// the ability to pass a context and additional request options.
//
// See CreateSnapshot for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) CreateSnapshotWithContext(ctx aws.Context, input *CreateSnapshotInput, opts ...request.Option) (*CreateSnapshotOutput, error) {
req, out := c.CreateSnapshotRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteCacheCluster = "DeleteCacheCluster"
// DeleteCacheClusterRequest generates a "aws/request.Request" representing the
// client's request for the DeleteCacheCluster operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DeleteCacheCluster for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DeleteCacheCluster method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DeleteCacheClusterRequest method.
// req, resp := client.DeleteCacheClusterRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheCluster
func (c *ElastiCache) DeleteCacheClusterRequest(input *DeleteCacheClusterInput) (req *request.Request, output *DeleteCacheClusterOutput) {
op := &request.Operation{
Name: opDeleteCacheCluster,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteCacheClusterInput{}
}
output = &DeleteCacheClusterOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteCacheCluster API operation for Amazon ElastiCache.
//
// Deletes a previously provisioned cache cluster. DeleteCacheCluster deletes
// all associated cache nodes, node endpoints and the cache cluster itself.
// When you receive a successful response from this operation, Amazon ElastiCache
// immediately begins deleting the cache cluster; you cannot cancel or revert
// this operation.
//
// This operation cannot be used to delete a cache cluster that is the last
// read replica of a replication group or node group (shard) that has Multi-AZ
// mode enabled or a cache cluster from a Redis (cluster mode enabled) replication
// group.
//
// Due to current limitations on Redis (cluster mode disabled), this operation
// or parameter is not supported on Redis (cluster mode enabled) replication
// groups.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DeleteCacheCluster for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState"
// The requested cache cluster is not in the available state.
//
// * ErrCodeSnapshotAlreadyExistsFault "SnapshotAlreadyExistsFault"
// You already have a snapshot with the given name.
//
// * ErrCodeSnapshotFeatureNotSupportedFault "SnapshotFeatureNotSupportedFault"
// You attempted one of the following operations:
//
// * Creating a snapshot of a Redis cache cluster running on a cache.t1.micro
// cache node.
//
// * Creating a snapshot of a cache cluster that is running Memcached rather
// than Redis.
//
// Neither of these are supported by ElastiCache.
//
// * ErrCodeSnapshotQuotaExceededFault "SnapshotQuotaExceededFault"
// The request cannot be processed because it would exceed the maximum number
// of snapshots.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheCluster
func (c *ElastiCache) DeleteCacheCluster(input *DeleteCacheClusterInput) (*DeleteCacheClusterOutput, error) {
req, out := c.DeleteCacheClusterRequest(input)
return out, req.Send()
}
// DeleteCacheClusterWithContext is the same as DeleteCacheCluster with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteCacheCluster for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DeleteCacheClusterWithContext(ctx aws.Context, input *DeleteCacheClusterInput, opts ...request.Option) (*DeleteCacheClusterOutput, error) {
req, out := c.DeleteCacheClusterRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteCacheParameterGroup = "DeleteCacheParameterGroup"
// DeleteCacheParameterGroupRequest generates a "aws/request.Request" representing the
// client's request for the DeleteCacheParameterGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DeleteCacheParameterGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DeleteCacheParameterGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DeleteCacheParameterGroupRequest method.
// req, resp := client.DeleteCacheParameterGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheParameterGroup
func (c *ElastiCache) DeleteCacheParameterGroupRequest(input *DeleteCacheParameterGroupInput) (req *request.Request, output *DeleteCacheParameterGroupOutput) {
op := &request.Operation{
Name: opDeleteCacheParameterGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteCacheParameterGroupInput{}
}
output = &DeleteCacheParameterGroupOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Remove(query.UnmarshalHandler)
req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteCacheParameterGroup API operation for Amazon ElastiCache.
//
// Deletes the specified cache parameter group. You cannot delete a cache parameter
// group if it is associated with any cache clusters.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DeleteCacheParameterGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState"
// The current state of the cache parameter group does not allow the requested
// operation to occur.
//
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheParameterGroup
func (c *ElastiCache) DeleteCacheParameterGroup(input *DeleteCacheParameterGroupInput) (*DeleteCacheParameterGroupOutput, error) {
req, out := c.DeleteCacheParameterGroupRequest(input)
return out, req.Send()
}
// DeleteCacheParameterGroupWithContext is the same as DeleteCacheParameterGroup with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteCacheParameterGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DeleteCacheParameterGroupWithContext(ctx aws.Context, input *DeleteCacheParameterGroupInput, opts ...request.Option) (*DeleteCacheParameterGroupOutput, error) {
req, out := c.DeleteCacheParameterGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteCacheSecurityGroup = "DeleteCacheSecurityGroup"
// DeleteCacheSecurityGroupRequest generates a "aws/request.Request" representing the
// client's request for the DeleteCacheSecurityGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DeleteCacheSecurityGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DeleteCacheSecurityGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DeleteCacheSecurityGroupRequest method.
// req, resp := client.DeleteCacheSecurityGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSecurityGroup
func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurityGroupInput) (req *request.Request, output *DeleteCacheSecurityGroupOutput) {
op := &request.Operation{
Name: opDeleteCacheSecurityGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteCacheSecurityGroupInput{}
}
output = &DeleteCacheSecurityGroupOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Remove(query.UnmarshalHandler)
req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteCacheSecurityGroup API operation for Amazon ElastiCache.
//
// Deletes a cache security group.
//
// You cannot delete a cache security group if it is associated with any cache
// clusters.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DeleteCacheSecurityGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState"
// The current state of the cache security group does not allow deletion.
//
// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound"
// The requested cache security group name does not refer to an existing cache
// security group.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSecurityGroup
func (c *ElastiCache) DeleteCacheSecurityGroup(input *DeleteCacheSecurityGroupInput) (*DeleteCacheSecurityGroupOutput, error) {
req, out := c.DeleteCacheSecurityGroupRequest(input)
return out, req.Send()
}
// DeleteCacheSecurityGroupWithContext is the same as DeleteCacheSecurityGroup with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteCacheSecurityGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DeleteCacheSecurityGroupWithContext(ctx aws.Context, input *DeleteCacheSecurityGroupInput, opts ...request.Option) (*DeleteCacheSecurityGroupOutput, error) {
req, out := c.DeleteCacheSecurityGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteCacheSubnetGroup = "DeleteCacheSubnetGroup"
// DeleteCacheSubnetGroupRequest generates a "aws/request.Request" representing the
// client's request for the DeleteCacheSubnetGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DeleteCacheSubnetGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DeleteCacheSubnetGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DeleteCacheSubnetGroupRequest method.
// req, resp := client.DeleteCacheSubnetGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSubnetGroup
func (c *ElastiCache) DeleteCacheSubnetGroupRequest(input *DeleteCacheSubnetGroupInput) (req *request.Request, output *DeleteCacheSubnetGroupOutput) {
op := &request.Operation{
Name: opDeleteCacheSubnetGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteCacheSubnetGroupInput{}
}
output = &DeleteCacheSubnetGroupOutput{}
req = c.newRequest(op, input, output)
req.Handlers.Unmarshal.Remove(query.UnmarshalHandler)
req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
return
}
// DeleteCacheSubnetGroup API operation for Amazon ElastiCache.
//
// Deletes a cache subnet group.
//
// You cannot delete a cache subnet group if it is associated with any cache
// clusters.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DeleteCacheSubnetGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheSubnetGroupInUse "CacheSubnetGroupInUse"
// The requested cache subnet group is currently in use.
//
// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault"
// The requested cache subnet group name does not refer to an existing cache
// subnet group.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSubnetGroup
func (c *ElastiCache) DeleteCacheSubnetGroup(input *DeleteCacheSubnetGroupInput) (*DeleteCacheSubnetGroupOutput, error) {
req, out := c.DeleteCacheSubnetGroupRequest(input)
return out, req.Send()
}
// DeleteCacheSubnetGroupWithContext is the same as DeleteCacheSubnetGroup with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteCacheSubnetGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DeleteCacheSubnetGroupWithContext(ctx aws.Context, input *DeleteCacheSubnetGroupInput, opts ...request.Option) (*DeleteCacheSubnetGroupOutput, error) {
req, out := c.DeleteCacheSubnetGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteReplicationGroup = "DeleteReplicationGroup"
// DeleteReplicationGroupRequest generates a "aws/request.Request" representing the
// client's request for the DeleteReplicationGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DeleteReplicationGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DeleteReplicationGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DeleteReplicationGroupRequest method.
// req, resp := client.DeleteReplicationGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteReplicationGroup
func (c *ElastiCache) DeleteReplicationGroupRequest(input *DeleteReplicationGroupInput) (req *request.Request, output *DeleteReplicationGroupOutput) {
op := &request.Operation{
Name: opDeleteReplicationGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteReplicationGroupInput{}
}
output = &DeleteReplicationGroupOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteReplicationGroup API operation for Amazon ElastiCache.
//
// Deletes an existing replication group. By default, this operation deletes
// the entire replication group, including the primary/primaries and all of
// the read replicas. If the replication group has only one primary, you can
// optionally delete only the read replicas, while retaining the primary by
// setting RetainPrimaryCluster=true.
//
// When you receive a successful response from this operation, Amazon ElastiCache
// immediately begins deleting the selected resources; you cannot cancel or
// revert this operation.
//
// This operation is valid for Redis only.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DeleteReplicationGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault"
// The specified replication group does not exist.
//
// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState"
// The requested replication group is not in the available state.
//
// * ErrCodeSnapshotAlreadyExistsFault "SnapshotAlreadyExistsFault"
// You already have a snapshot with the given name.
//
// * ErrCodeSnapshotFeatureNotSupportedFault "SnapshotFeatureNotSupportedFault"
// You attempted one of the following operations:
//
// * Creating a snapshot of a Redis cache cluster running on a cache.t1.micro
// cache node.
//
// * Creating a snapshot of a cache cluster that is running Memcached rather
// than Redis.
//
// Neither of these are supported by ElastiCache.
//
// * ErrCodeSnapshotQuotaExceededFault "SnapshotQuotaExceededFault"
// The request cannot be processed because it would exceed the maximum number
// of snapshots.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteReplicationGroup
func (c *ElastiCache) DeleteReplicationGroup(input *DeleteReplicationGroupInput) (*DeleteReplicationGroupOutput, error) {
req, out := c.DeleteReplicationGroupRequest(input)
return out, req.Send()
}
// DeleteReplicationGroupWithContext is the same as DeleteReplicationGroup with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteReplicationGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DeleteReplicationGroupWithContext(ctx aws.Context, input *DeleteReplicationGroupInput, opts ...request.Option) (*DeleteReplicationGroupOutput, error) {
req, out := c.DeleteReplicationGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDeleteSnapshot = "DeleteSnapshot"
// DeleteSnapshotRequest generates a "aws/request.Request" representing the
// client's request for the DeleteSnapshot operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DeleteSnapshot for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DeleteSnapshot method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DeleteSnapshotRequest method.
// req, resp := client.DeleteSnapshotRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteSnapshot
func (c *ElastiCache) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) {
op := &request.Operation{
Name: opDeleteSnapshot,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DeleteSnapshotInput{}
}
output = &DeleteSnapshotOutput{}
req = c.newRequest(op, input, output)
return
}
// DeleteSnapshot API operation for Amazon ElastiCache.
//
// Deletes an existing snapshot. When you receive a successful response from
// this operation, ElastiCache immediately begins deleting the snapshot; you
// cannot cancel or revert this operation.
//
// This operation is valid for Redis only.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DeleteSnapshot for usage and error information.
//
// Returned Error Codes:
// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault"
// The requested snapshot name does not refer to an existing snapshot.
//
// * ErrCodeInvalidSnapshotStateFault "InvalidSnapshotState"
// The current state of the snapshot does not allow the requested operation
// to occur.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteSnapshot
func (c *ElastiCache) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, error) {
req, out := c.DeleteSnapshotRequest(input)
return out, req.Send()
}
// DeleteSnapshotWithContext is the same as DeleteSnapshot with the addition of
// the ability to pass a context and additional request options.
//
// See DeleteSnapshot for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DeleteSnapshotWithContext(ctx aws.Context, input *DeleteSnapshotInput, opts ...request.Option) (*DeleteSnapshotOutput, error) {
req, out := c.DeleteSnapshotRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opDescribeCacheClusters = "DescribeCacheClusters"
// DescribeCacheClustersRequest generates a "aws/request.Request" representing the
// client's request for the DescribeCacheClusters operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeCacheClusters for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeCacheClusters method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeCacheClustersRequest method.
// req, resp := client.DescribeCacheClustersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheClusters
func (c *ElastiCache) DescribeCacheClustersRequest(input *DescribeCacheClustersInput) (req *request.Request, output *DescribeCacheClustersOutput) {
op := &request.Operation{
Name: opDescribeCacheClusters,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeCacheClustersInput{}
}
output = &DescribeCacheClustersOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeCacheClusters API operation for Amazon ElastiCache.
//
// Returns information about all provisioned cache clusters if no cache cluster
// identifier is specified, or about a specific cache cluster if a cache cluster
// identifier is supplied.
//
// By default, abbreviated information about the cache clusters is returned.
// You can use the optional ShowCacheNodeInfo flag to retrieve detailed information
// about the cache nodes associated with the cache clusters. These details include
// the DNS address and port for the cache node endpoint.
//
// If the cluster is in the creating state, only cluster-level information is
// displayed until all of the nodes are successfully provisioned.
//
// If the cluster is in the deleting state, only cluster-level information is
// displayed.
//
// If cache nodes are currently being added to the cache cluster, node endpoint
// information and creation time for the additional nodes are not displayed
// until they are completely provisioned. When the cache cluster state is available,
// the cluster is ready for use.
//
// If cache nodes are currently being removed from the cache cluster, no endpoint
// information for the removed nodes is displayed.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeCacheClusters for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheClusters
func (c *ElastiCache) DescribeCacheClusters(input *DescribeCacheClustersInput) (*DescribeCacheClustersOutput, error) {
req, out := c.DescribeCacheClustersRequest(input)
return out, req.Send()
}
// DescribeCacheClustersWithContext is the same as DescribeCacheClusters with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeCacheClusters for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheClustersWithContext(ctx aws.Context, input *DescribeCacheClustersInput, opts ...request.Option) (*DescribeCacheClustersOutput, error) {
req, out := c.DescribeCacheClustersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeCacheClustersPages iterates over the pages of a DescribeCacheClusters operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeCacheClusters method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeCacheClusters operation.
// pageNum := 0
// err := client.DescribeCacheClustersPages(params,
// func(page *DescribeCacheClustersOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeCacheClustersPages(input *DescribeCacheClustersInput, fn func(*DescribeCacheClustersOutput, bool) bool) error {
return c.DescribeCacheClustersPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeCacheClustersPagesWithContext same as DescribeCacheClustersPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheClustersPagesWithContext(ctx aws.Context, input *DescribeCacheClustersInput, fn func(*DescribeCacheClustersOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeCacheClustersInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeCacheClustersRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeCacheClustersOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeCacheEngineVersions = "DescribeCacheEngineVersions"
// DescribeCacheEngineVersionsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeCacheEngineVersions operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeCacheEngineVersions for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeCacheEngineVersions method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeCacheEngineVersionsRequest method.
// req, resp := client.DescribeCacheEngineVersionsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheEngineVersions
func (c *ElastiCache) DescribeCacheEngineVersionsRequest(input *DescribeCacheEngineVersionsInput) (req *request.Request, output *DescribeCacheEngineVersionsOutput) {
op := &request.Operation{
Name: opDescribeCacheEngineVersions,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeCacheEngineVersionsInput{}
}
output = &DescribeCacheEngineVersionsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeCacheEngineVersions API operation for Amazon ElastiCache.
//
// Returns a list of the available cache engines and their versions.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeCacheEngineVersions for usage and error information.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheEngineVersions
func (c *ElastiCache) DescribeCacheEngineVersions(input *DescribeCacheEngineVersionsInput) (*DescribeCacheEngineVersionsOutput, error) {
req, out := c.DescribeCacheEngineVersionsRequest(input)
return out, req.Send()
}
// DescribeCacheEngineVersionsWithContext is the same as DescribeCacheEngineVersions with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeCacheEngineVersions for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheEngineVersionsWithContext(ctx aws.Context, input *DescribeCacheEngineVersionsInput, opts ...request.Option) (*DescribeCacheEngineVersionsOutput, error) {
req, out := c.DescribeCacheEngineVersionsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeCacheEngineVersionsPages iterates over the pages of a DescribeCacheEngineVersions operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeCacheEngineVersions method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeCacheEngineVersions operation.
// pageNum := 0
// err := client.DescribeCacheEngineVersionsPages(params,
// func(page *DescribeCacheEngineVersionsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeCacheEngineVersionsPages(input *DescribeCacheEngineVersionsInput, fn func(*DescribeCacheEngineVersionsOutput, bool) bool) error {
return c.DescribeCacheEngineVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeCacheEngineVersionsPagesWithContext same as DescribeCacheEngineVersionsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheEngineVersionsPagesWithContext(ctx aws.Context, input *DescribeCacheEngineVersionsInput, fn func(*DescribeCacheEngineVersionsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeCacheEngineVersionsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeCacheEngineVersionsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeCacheEngineVersionsOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeCacheParameterGroups = "DescribeCacheParameterGroups"
// DescribeCacheParameterGroupsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeCacheParameterGroups operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeCacheParameterGroups for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeCacheParameterGroups method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeCacheParameterGroupsRequest method.
// req, resp := client.DescribeCacheParameterGroupsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameterGroups
func (c *ElastiCache) DescribeCacheParameterGroupsRequest(input *DescribeCacheParameterGroupsInput) (req *request.Request, output *DescribeCacheParameterGroupsOutput) {
op := &request.Operation{
Name: opDescribeCacheParameterGroups,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeCacheParameterGroupsInput{}
}
output = &DescribeCacheParameterGroupsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeCacheParameterGroups API operation for Amazon ElastiCache.
//
// Returns a list of cache parameter group descriptions. If a cache parameter
// group name is specified, the list contains only the descriptions for that
// group.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeCacheParameterGroups for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameterGroups
func (c *ElastiCache) DescribeCacheParameterGroups(input *DescribeCacheParameterGroupsInput) (*DescribeCacheParameterGroupsOutput, error) {
req, out := c.DescribeCacheParameterGroupsRequest(input)
return out, req.Send()
}
// DescribeCacheParameterGroupsWithContext is the same as DescribeCacheParameterGroups with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeCacheParameterGroups for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheParameterGroupsWithContext(ctx aws.Context, input *DescribeCacheParameterGroupsInput, opts ...request.Option) (*DescribeCacheParameterGroupsOutput, error) {
req, out := c.DescribeCacheParameterGroupsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeCacheParameterGroupsPages iterates over the pages of a DescribeCacheParameterGroups operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeCacheParameterGroups method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeCacheParameterGroups operation.
// pageNum := 0
// err := client.DescribeCacheParameterGroupsPages(params,
// func(page *DescribeCacheParameterGroupsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeCacheParameterGroupsPages(input *DescribeCacheParameterGroupsInput, fn func(*DescribeCacheParameterGroupsOutput, bool) bool) error {
return c.DescribeCacheParameterGroupsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeCacheParameterGroupsPagesWithContext same as DescribeCacheParameterGroupsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheParameterGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheParameterGroupsInput, fn func(*DescribeCacheParameterGroupsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeCacheParameterGroupsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeCacheParameterGroupsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeCacheParameterGroupsOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeCacheParameters = "DescribeCacheParameters"
// DescribeCacheParametersRequest generates a "aws/request.Request" representing the
// client's request for the DescribeCacheParameters operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeCacheParameters for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeCacheParameters method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeCacheParametersRequest method.
// req, resp := client.DescribeCacheParametersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameters
func (c *ElastiCache) DescribeCacheParametersRequest(input *DescribeCacheParametersInput) (req *request.Request, output *DescribeCacheParametersOutput) {
op := &request.Operation{
Name: opDescribeCacheParameters,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeCacheParametersInput{}
}
output = &DescribeCacheParametersOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeCacheParameters API operation for Amazon ElastiCache.
//
// Returns the detailed parameter list for a particular cache parameter group.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeCacheParameters for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameters
func (c *ElastiCache) DescribeCacheParameters(input *DescribeCacheParametersInput) (*DescribeCacheParametersOutput, error) {
req, out := c.DescribeCacheParametersRequest(input)
return out, req.Send()
}
// DescribeCacheParametersWithContext is the same as DescribeCacheParameters with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeCacheParameters for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheParametersWithContext(ctx aws.Context, input *DescribeCacheParametersInput, opts ...request.Option) (*DescribeCacheParametersOutput, error) {
req, out := c.DescribeCacheParametersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeCacheParametersPages iterates over the pages of a DescribeCacheParameters operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeCacheParameters method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeCacheParameters operation.
// pageNum := 0
// err := client.DescribeCacheParametersPages(params,
// func(page *DescribeCacheParametersOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeCacheParametersPages(input *DescribeCacheParametersInput, fn func(*DescribeCacheParametersOutput, bool) bool) error {
return c.DescribeCacheParametersPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeCacheParametersPagesWithContext same as DescribeCacheParametersPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheParametersPagesWithContext(ctx aws.Context, input *DescribeCacheParametersInput, fn func(*DescribeCacheParametersOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeCacheParametersInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeCacheParametersRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeCacheParametersOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeCacheSecurityGroups = "DescribeCacheSecurityGroups"
// DescribeCacheSecurityGroupsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeCacheSecurityGroups operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeCacheSecurityGroups for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeCacheSecurityGroups method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeCacheSecurityGroupsRequest method.
// req, resp := client.DescribeCacheSecurityGroupsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSecurityGroups
func (c *ElastiCache) DescribeCacheSecurityGroupsRequest(input *DescribeCacheSecurityGroupsInput) (req *request.Request, output *DescribeCacheSecurityGroupsOutput) {
op := &request.Operation{
Name: opDescribeCacheSecurityGroups,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeCacheSecurityGroupsInput{}
}
output = &DescribeCacheSecurityGroupsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeCacheSecurityGroups API operation for Amazon ElastiCache.
//
// Returns a list of cache security group descriptions. If a cache security
// group name is specified, the list contains only the description of that group.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeCacheSecurityGroups for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound"
// The requested cache security group name does not refer to an existing cache
// security group.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSecurityGroups
func (c *ElastiCache) DescribeCacheSecurityGroups(input *DescribeCacheSecurityGroupsInput) (*DescribeCacheSecurityGroupsOutput, error) {
req, out := c.DescribeCacheSecurityGroupsRequest(input)
return out, req.Send()
}
// DescribeCacheSecurityGroupsWithContext is the same as DescribeCacheSecurityGroups with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeCacheSecurityGroups for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheSecurityGroupsWithContext(ctx aws.Context, input *DescribeCacheSecurityGroupsInput, opts ...request.Option) (*DescribeCacheSecurityGroupsOutput, error) {
req, out := c.DescribeCacheSecurityGroupsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeCacheSecurityGroupsPages iterates over the pages of a DescribeCacheSecurityGroups operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeCacheSecurityGroups method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeCacheSecurityGroups operation.
// pageNum := 0
// err := client.DescribeCacheSecurityGroupsPages(params,
// func(page *DescribeCacheSecurityGroupsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeCacheSecurityGroupsPages(input *DescribeCacheSecurityGroupsInput, fn func(*DescribeCacheSecurityGroupsOutput, bool) bool) error {
return c.DescribeCacheSecurityGroupsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeCacheSecurityGroupsPagesWithContext same as DescribeCacheSecurityGroupsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheSecurityGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheSecurityGroupsInput, fn func(*DescribeCacheSecurityGroupsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeCacheSecurityGroupsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeCacheSecurityGroupsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeCacheSecurityGroupsOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeCacheSubnetGroups = "DescribeCacheSubnetGroups"
// DescribeCacheSubnetGroupsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeCacheSubnetGroups operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeCacheSubnetGroups for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeCacheSubnetGroups method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeCacheSubnetGroupsRequest method.
// req, resp := client.DescribeCacheSubnetGroupsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSubnetGroups
func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubnetGroupsInput) (req *request.Request, output *DescribeCacheSubnetGroupsOutput) {
op := &request.Operation{
Name: opDescribeCacheSubnetGroups,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeCacheSubnetGroupsInput{}
}
output = &DescribeCacheSubnetGroupsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeCacheSubnetGroups API operation for Amazon ElastiCache.
//
// Returns a list of cache subnet group descriptions. If a subnet group name
// is specified, the list contains only the description of that group.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeCacheSubnetGroups for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault"
// The requested cache subnet group name does not refer to an existing cache
// subnet group.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSubnetGroups
func (c *ElastiCache) DescribeCacheSubnetGroups(input *DescribeCacheSubnetGroupsInput) (*DescribeCacheSubnetGroupsOutput, error) {
req, out := c.DescribeCacheSubnetGroupsRequest(input)
return out, req.Send()
}
// DescribeCacheSubnetGroupsWithContext is the same as DescribeCacheSubnetGroups with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeCacheSubnetGroups for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheSubnetGroupsWithContext(ctx aws.Context, input *DescribeCacheSubnetGroupsInput, opts ...request.Option) (*DescribeCacheSubnetGroupsOutput, error) {
req, out := c.DescribeCacheSubnetGroupsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeCacheSubnetGroupsPages iterates over the pages of a DescribeCacheSubnetGroups operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeCacheSubnetGroups method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeCacheSubnetGroups operation.
// pageNum := 0
// err := client.DescribeCacheSubnetGroupsPages(params,
// func(page *DescribeCacheSubnetGroupsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeCacheSubnetGroupsPages(input *DescribeCacheSubnetGroupsInput, fn func(*DescribeCacheSubnetGroupsOutput, bool) bool) error {
return c.DescribeCacheSubnetGroupsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeCacheSubnetGroupsPagesWithContext same as DescribeCacheSubnetGroupsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeCacheSubnetGroupsPagesWithContext(ctx aws.Context, input *DescribeCacheSubnetGroupsInput, fn func(*DescribeCacheSubnetGroupsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeCacheSubnetGroupsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeCacheSubnetGroupsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeCacheSubnetGroupsOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters"
// DescribeEngineDefaultParametersRequest generates a "aws/request.Request" representing the
// client's request for the DescribeEngineDefaultParameters operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeEngineDefaultParameters for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeEngineDefaultParameters method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeEngineDefaultParametersRequest method.
// req, resp := client.DescribeEngineDefaultParametersRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEngineDefaultParameters
func (c *ElastiCache) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) {
op := &request.Operation{
Name: opDescribeEngineDefaultParameters,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"EngineDefaults.Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeEngineDefaultParametersInput{}
}
output = &DescribeEngineDefaultParametersOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeEngineDefaultParameters API operation for Amazon ElastiCache.
//
// Returns the default engine and system parameter information for the specified
// cache engine.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeEngineDefaultParameters for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEngineDefaultParameters
func (c *ElastiCache) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParametersInput) (*DescribeEngineDefaultParametersOutput, error) {
req, out := c.DescribeEngineDefaultParametersRequest(input)
return out, req.Send()
}
// DescribeEngineDefaultParametersWithContext is the same as DescribeEngineDefaultParameters with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeEngineDefaultParameters for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeEngineDefaultParametersWithContext(ctx aws.Context, input *DescribeEngineDefaultParametersInput, opts ...request.Option) (*DescribeEngineDefaultParametersOutput, error) {
req, out := c.DescribeEngineDefaultParametersRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeEngineDefaultParametersPages iterates over the pages of a DescribeEngineDefaultParameters operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeEngineDefaultParameters method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation.
// pageNum := 0
// err := client.DescribeEngineDefaultParametersPages(params,
// func(page *DescribeEngineDefaultParametersOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(*DescribeEngineDefaultParametersOutput, bool) bool) error {
return c.DescribeEngineDefaultParametersPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeEngineDefaultParametersPagesWithContext same as DescribeEngineDefaultParametersPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeEngineDefaultParametersPagesWithContext(ctx aws.Context, input *DescribeEngineDefaultParametersInput, fn func(*DescribeEngineDefaultParametersOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeEngineDefaultParametersInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeEngineDefaultParametersRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeEngineDefaultParametersOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeEvents = "DescribeEvents"
// DescribeEventsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeEvents operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeEvents for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeEvents method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeEventsRequest method.
// req, resp := client.DescribeEventsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEvents
func (c *ElastiCache) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) {
op := &request.Operation{
Name: opDescribeEvents,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeEventsInput{}
}
output = &DescribeEventsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeEvents API operation for Amazon ElastiCache.
//
// Returns events related to cache clusters, cache security groups, and cache
// parameter groups. You can obtain events specific to a particular cache cluster,
// cache security group, or cache parameter group by providing the name as a
// parameter.
//
// By default, only the events occurring within the last hour are returned;
// however, you can retrieve up to 14 days' worth of events if necessary.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeEvents for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEvents
func (c *ElastiCache) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, error) {
req, out := c.DescribeEventsRequest(input)
return out, req.Send()
}
// DescribeEventsWithContext is the same as DescribeEvents with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeEvents for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeEventsWithContext(ctx aws.Context, input *DescribeEventsInput, opts ...request.Option) (*DescribeEventsOutput, error) {
req, out := c.DescribeEventsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeEventsPages iterates over the pages of a DescribeEvents operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeEvents method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeEvents operation.
// pageNum := 0
// err := client.DescribeEventsPages(params,
// func(page *DescribeEventsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3 | return c.DescribeEventsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeEventsPagesWithContext same as DescribeEventsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeEventsPagesWithContext(ctx aws.Context, input *DescribeEventsInput, fn func(*DescribeEventsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeEventsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeEventsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeEventsOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeReplicationGroups = "DescribeReplicationGroups"
// DescribeReplicationGroupsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeReplicationGroups operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeReplicationGroups for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeReplicationGroups method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeReplicationGroupsRequest method.
// req, resp := client.DescribeReplicationGroupsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReplicationGroups
func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicationGroupsInput) (req *request.Request, output *DescribeReplicationGroupsOutput) {
op := &request.Operation{
Name: opDescribeReplicationGroups,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeReplicationGroupsInput{}
}
output = &DescribeReplicationGroupsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeReplicationGroups API operation for Amazon ElastiCache.
//
// Returns information about a particular replication group. If no identifier
// is specified, DescribeReplicationGroups returns information about all replication
// groups.
//
// This operation is valid for Redis only.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeReplicationGroups for usage and error information.
//
// Returned Error Codes:
// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault"
// The specified replication group does not exist.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReplicationGroups
func (c *ElastiCache) DescribeReplicationGroups(input *DescribeReplicationGroupsInput) (*DescribeReplicationGroupsOutput, error) {
req, out := c.DescribeReplicationGroupsRequest(input)
return out, req.Send()
}
// DescribeReplicationGroupsWithContext is the same as DescribeReplicationGroups with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeReplicationGroups for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeReplicationGroupsWithContext(ctx aws.Context, input *DescribeReplicationGroupsInput, opts ...request.Option) (*DescribeReplicationGroupsOutput, error) {
req, out := c.DescribeReplicationGroupsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeReplicationGroupsPages iterates over the pages of a DescribeReplicationGroups operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeReplicationGroups method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeReplicationGroups operation.
// pageNum := 0
// err := client.DescribeReplicationGroupsPages(params,
// func(page *DescribeReplicationGroupsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeReplicationGroupsPages(input *DescribeReplicationGroupsInput, fn func(*DescribeReplicationGroupsOutput, bool) bool) error {
return c.DescribeReplicationGroupsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeReplicationGroupsPagesWithContext same as DescribeReplicationGroupsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeReplicationGroupsPagesWithContext(ctx aws.Context, input *DescribeReplicationGroupsInput, fn func(*DescribeReplicationGroupsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeReplicationGroupsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeReplicationGroupsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeReplicationGroupsOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeReservedCacheNodes = "DescribeReservedCacheNodes"
// DescribeReservedCacheNodesRequest generates a "aws/request.Request" representing the
// client's request for the DescribeReservedCacheNodes operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeReservedCacheNodes for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeReservedCacheNodes method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeReservedCacheNodesRequest method.
// req, resp := client.DescribeReservedCacheNodesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodes
func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedCacheNodesInput) (req *request.Request, output *DescribeReservedCacheNodesOutput) {
op := &request.Operation{
Name: opDescribeReservedCacheNodes,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeReservedCacheNodesInput{}
}
output = &DescribeReservedCacheNodesOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeReservedCacheNodes API operation for Amazon ElastiCache.
//
// Returns information about reserved cache nodes for this account, or about
// a specified reserved cache node.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeReservedCacheNodes for usage and error information.
//
// Returned Error Codes:
// * ErrCodeReservedCacheNodeNotFoundFault "ReservedCacheNodeNotFound"
// The requested reserved cache node was not found.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodes
func (c *ElastiCache) DescribeReservedCacheNodes(input *DescribeReservedCacheNodesInput) (*DescribeReservedCacheNodesOutput, error) {
req, out := c.DescribeReservedCacheNodesRequest(input)
return out, req.Send()
}
// DescribeReservedCacheNodesWithContext is the same as DescribeReservedCacheNodes with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeReservedCacheNodes for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeReservedCacheNodesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesInput, opts ...request.Option) (*DescribeReservedCacheNodesOutput, error) {
req, out := c.DescribeReservedCacheNodesRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeReservedCacheNodesPages iterates over the pages of a DescribeReservedCacheNodes operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeReservedCacheNodes method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeReservedCacheNodes operation.
// pageNum := 0
// err := client.DescribeReservedCacheNodesPages(params,
// func(page *DescribeReservedCacheNodesOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeReservedCacheNodesPages(input *DescribeReservedCacheNodesInput, fn func(*DescribeReservedCacheNodesOutput, bool) bool) error {
return c.DescribeReservedCacheNodesPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeReservedCacheNodesPagesWithContext same as DescribeReservedCacheNodesPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeReservedCacheNodesPagesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesInput, fn func(*DescribeReservedCacheNodesOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeReservedCacheNodesInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeReservedCacheNodesRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeReservedCacheNodesOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeReservedCacheNodesOfferings = "DescribeReservedCacheNodesOfferings"
// DescribeReservedCacheNodesOfferingsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeReservedCacheNodesOfferings operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeReservedCacheNodesOfferings for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeReservedCacheNodesOfferings method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeReservedCacheNodesOfferingsRequest method.
// req, resp := client.DescribeReservedCacheNodesOfferingsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodesOfferings
func (c *ElastiCache) DescribeReservedCacheNodesOfferingsRequest(input *DescribeReservedCacheNodesOfferingsInput) (req *request.Request, output *DescribeReservedCacheNodesOfferingsOutput) {
op := &request.Operation{
Name: opDescribeReservedCacheNodesOfferings,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeReservedCacheNodesOfferingsInput{}
}
output = &DescribeReservedCacheNodesOfferingsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeReservedCacheNodesOfferings API operation for Amazon ElastiCache.
//
// Lists available reserved cache node offerings.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeReservedCacheNodesOfferings for usage and error information.
//
// Returned Error Codes:
// * ErrCodeReservedCacheNodesOfferingNotFoundFault "ReservedCacheNodesOfferingNotFound"
// The requested cache node offering does not exist.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodesOfferings
func (c *ElastiCache) DescribeReservedCacheNodesOfferings(input *DescribeReservedCacheNodesOfferingsInput) (*DescribeReservedCacheNodesOfferingsOutput, error) {
req, out := c.DescribeReservedCacheNodesOfferingsRequest(input)
return out, req.Send()
}
// DescribeReservedCacheNodesOfferingsWithContext is the same as DescribeReservedCacheNodesOfferings with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeReservedCacheNodesOfferings for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeReservedCacheNodesOfferingsWithContext(ctx aws.Context, input *DescribeReservedCacheNodesOfferingsInput, opts ...request.Option) (*DescribeReservedCacheNodesOfferingsOutput, error) {
req, out := c.DescribeReservedCacheNodesOfferingsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeReservedCacheNodesOfferingsPages iterates over the pages of a DescribeReservedCacheNodesOfferings operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeReservedCacheNodesOfferings method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeReservedCacheNodesOfferings operation.
// pageNum := 0
// err := client.DescribeReservedCacheNodesOfferingsPages(params,
// func(page *DescribeReservedCacheNodesOfferingsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPages(input *DescribeReservedCacheNodesOfferingsInput, fn func(*DescribeReservedCacheNodesOfferingsOutput, bool) bool) error {
return c.DescribeReservedCacheNodesOfferingsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeReservedCacheNodesOfferingsPagesWithContext same as DescribeReservedCacheNodesOfferingsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPagesWithContext(ctx aws.Context, input *DescribeReservedCacheNodesOfferingsInput, fn func(*DescribeReservedCacheNodesOfferingsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeReservedCacheNodesOfferingsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeReservedCacheNodesOfferingsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeReservedCacheNodesOfferingsOutput), !p.HasNextPage())
}
return p.Err()
}
const opDescribeSnapshots = "DescribeSnapshots"
// DescribeSnapshotsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeSnapshots operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See DescribeSnapshots for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the DescribeSnapshots method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the DescribeSnapshotsRequest method.
// req, resp := client.DescribeSnapshotsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots
func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) {
op := &request.Operation{
Name: opDescribeSnapshots,
HTTPMethod: "POST",
HTTPPath: "/",
Paginator: &request.Paginator{
InputTokens: []string{"Marker"},
OutputTokens: []string{"Marker"},
LimitToken: "MaxRecords",
TruncationToken: "",
},
}
if input == nil {
input = &DescribeSnapshotsInput{}
}
output = &DescribeSnapshotsOutput{}
req = c.newRequest(op, input, output)
return
}
// DescribeSnapshots API operation for Amazon ElastiCache.
//
// Returns information about cache cluster or replication group snapshots. By
// default, DescribeSnapshots lists all of your snapshots; it can optionally
// describe a single snapshot, or just the snapshots associated with a particular
// cache cluster.
//
// This operation is valid for Redis only.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation DescribeSnapshots for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault"
// The requested snapshot name does not refer to an existing snapshot.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshots
func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) {
req, out := c.DescribeSnapshotsRequest(input)
return out, req.Send()
}
// DescribeSnapshotsWithContext is the same as DescribeSnapshots with the addition of
// the ability to pass a context and additional request options.
//
// See DescribeSnapshots for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeSnapshotsWithContext(ctx aws.Context, input *DescribeSnapshotsInput, opts ...request.Option) (*DescribeSnapshotsOutput, error) {
req, out := c.DescribeSnapshotsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation,
// calling the "fn" function with the response data for each page. To stop
// iterating, return false from the fn function.
//
// See DescribeSnapshots method for more information on how to use this operation.
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a DescribeSnapshots operation.
// pageNum := 0
// err := client.DescribeSnapshotsPages(params,
// func(page *DescribeSnapshotsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool) error {
return c.DescribeSnapshotsPagesWithContext(aws.BackgroundContext(), input, fn)
}
// DescribeSnapshotsPagesWithContext same as DescribeSnapshotsPages except
// it takes a Context and allows setting request options on the pages.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) DescribeSnapshotsPagesWithContext(ctx aws.Context, input *DescribeSnapshotsInput, fn func(*DescribeSnapshotsOutput, bool) bool, opts ...request.Option) error {
p := request.Pagination{
NewRequest: func() (*request.Request, error) {
var inCpy *DescribeSnapshotsInput
if input != nil {
tmp := *input
inCpy = &tmp
}
req, _ := c.DescribeSnapshotsRequest(inCpy)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return req, nil
},
}
cont := true
for p.Next() && cont {
cont = fn(p.Page().(*DescribeSnapshotsOutput), !p.HasNextPage())
}
return p.Err()
}
const opListAllowedNodeTypeModifications = "ListAllowedNodeTypeModifications"
// ListAllowedNodeTypeModificationsRequest generates a "aws/request.Request" representing the
// client's request for the ListAllowedNodeTypeModifications operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See ListAllowedNodeTypeModifications for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ListAllowedNodeTypeModifications method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ListAllowedNodeTypeModificationsRequest method.
// req, resp := client.ListAllowedNodeTypeModificationsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListAllowedNodeTypeModifications
func (c *ElastiCache) ListAllowedNodeTypeModificationsRequest(input *ListAllowedNodeTypeModificationsInput) (req *request.Request, output *ListAllowedNodeTypeModificationsOutput) {
op := &request.Operation{
Name: opListAllowedNodeTypeModifications,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListAllowedNodeTypeModificationsInput{}
}
output = &ListAllowedNodeTypeModificationsOutput{}
req = c.newRequest(op, input, output)
return
}
// ListAllowedNodeTypeModifications API operation for Amazon ElastiCache.
//
// Lists all available node types that you can scale your Redis cluster's or
// replication group's current node type up to.
//
// When you use the ModifyCacheCluster or ModifyReplicationGroup operations
// to scale up your cluster or replication group, the value of the CacheNodeType
// parameter must be one of the node types returned by this operation.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation ListAllowedNodeTypeModifications for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault"
// The specified replication group does not exist.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListAllowedNodeTypeModifications
func (c *ElastiCache) ListAllowedNodeTypeModifications(input *ListAllowedNodeTypeModificationsInput) (*ListAllowedNodeTypeModificationsOutput, error) {
req, out := c.ListAllowedNodeTypeModificationsRequest(input)
return out, req.Send()
}
// ListAllowedNodeTypeModificationsWithContext is the same as ListAllowedNodeTypeModifications with the addition of
// the ability to pass a context and additional request options.
//
// See ListAllowedNodeTypeModifications for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) ListAllowedNodeTypeModificationsWithContext(ctx aws.Context, input *ListAllowedNodeTypeModificationsInput, opts ...request.Option) (*ListAllowedNodeTypeModificationsOutput, error) {
req, out := c.ListAllowedNodeTypeModificationsRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opListTagsForResource = "ListTagsForResource"
// ListTagsForResourceRequest generates a "aws/request.Request" representing the
// client's request for the ListTagsForResource operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See ListTagsForResource for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ListTagsForResource method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ListTagsForResourceRequest method.
// req, resp := client.ListTagsForResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListTagsForResource
func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *TagListMessage) {
op := &request.Operation{
Name: opListTagsForResource,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ListTagsForResourceInput{}
}
output = &TagListMessage{}
req = c.newRequest(op, input, output)
return
}
// ListTagsForResource API operation for Amazon ElastiCache.
//
// Lists all cost allocation tags currently on the named resource. A cost allocation
// tag is a key-value pair where the key is case-sensitive and the value is
// optional. You can use cost allocation tags to categorize and track your AWS
// costs.
//
// You can have a maximum of 50 cost allocation tags on an ElastiCache resource.
// For more information, see Using Cost Allocation Tags in Amazon ElastiCache
// (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/BestPractices.html).
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation ListTagsForResource for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault"
// The requested snapshot name does not refer to an existing snapshot.
//
// * ErrCodeInvalidARNFault "InvalidARN"
// The requested Amazon Resource Name (ARN) does not refer to an existing resource.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListTagsForResource
func (c *ElastiCache) ListTagsForResource(input *ListTagsForResourceInput) (*TagListMessage, error) {
req, out := c.ListTagsForResourceRequest(input)
return out, req.Send()
}
// ListTagsForResourceWithContext is the same as ListTagsForResource with the addition of
// the ability to pass a context and additional request options.
//
// See ListTagsForResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) ListTagsForResourceWithContext(ctx aws.Context, input *ListTagsForResourceInput, opts ...request.Option) (*TagListMessage, error) {
req, out := c.ListTagsForResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opModifyCacheCluster = "ModifyCacheCluster"
// ModifyCacheClusterRequest generates a "aws/request.Request" representing the
// client's request for the ModifyCacheCluster operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See ModifyCacheCluster for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ModifyCacheCluster method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ModifyCacheClusterRequest method.
// req, resp := client.ModifyCacheClusterRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheCluster
func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) (req *request.Request, output *ModifyCacheClusterOutput) {
op := &request.Operation{
Name: opModifyCacheCluster,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ModifyCacheClusterInput{}
}
output = &ModifyCacheClusterOutput{}
req = c.newRequest(op, input, output)
return
}
// ModifyCacheCluster API operation for Amazon ElastiCache.
//
// Modifies the settings for a cache cluster. You can use this operation to
// change one or more cluster configuration parameters by specifying the parameters
// and the new values.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation ModifyCacheCluster for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState"
// The requested cache cluster is not in the available state.
//
// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState"
// The current state of the cache security group does not allow deletion.
//
// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity"
// The requested cache node type is not available in the specified Availability
// Zone.
//
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache nodes in a single cache cluster.
//
// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache nodes per customer.
//
// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound"
// The requested cache security group name does not refer to an existing cache
// security group.
//
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault"
// The VPC network is in an invalid state.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheCluster
func (c *ElastiCache) ModifyCacheCluster(input *ModifyCacheClusterInput) (*ModifyCacheClusterOutput, error) {
req, out := c.ModifyCacheClusterRequest(input)
return out, req.Send()
}
// ModifyCacheClusterWithContext is the same as ModifyCacheCluster with the addition of
// the ability to pass a context and additional request options.
//
// See ModifyCacheCluster for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) ModifyCacheClusterWithContext(ctx aws.Context, input *ModifyCacheClusterInput, opts ...request.Option) (*ModifyCacheClusterOutput, error) {
req, out := c.ModifyCacheClusterRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opModifyCacheParameterGroup = "ModifyCacheParameterGroup"
// ModifyCacheParameterGroupRequest generates a "aws/request.Request" representing the
// client's request for the ModifyCacheParameterGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See ModifyCacheParameterGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ModifyCacheParameterGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ModifyCacheParameterGroupRequest method.
// req, resp := client.ModifyCacheParameterGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheParameterGroup
func (c *ElastiCache) ModifyCacheParameterGroupRequest(input *ModifyCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) {
op := &request.Operation{
Name: opModifyCacheParameterGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ModifyCacheParameterGroupInput{}
}
output = &CacheParameterGroupNameMessage{}
req = c.newRequest(op, input, output)
return
}
// ModifyCacheParameterGroup API operation for Amazon ElastiCache.
//
// Modifies the parameters of a cache parameter group. You can modify up to
// 20 parameters in a single request by submitting a list parameter name and
// value pairs.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation ModifyCacheParameterGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState"
// The current state of the cache parameter group does not allow the requested
// operation to occur.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheParameterGroup
func (c *ElastiCache) ModifyCacheParameterGroup(input *ModifyCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) {
req, out := c.ModifyCacheParameterGroupRequest(input)
return out, req.Send()
}
// ModifyCacheParameterGroupWithContext is the same as ModifyCacheParameterGroup with the addition of
// the ability to pass a context and additional request options.
//
// See ModifyCacheParameterGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) ModifyCacheParameterGroupWithContext(ctx aws.Context, input *ModifyCacheParameterGroupInput, opts ...request.Option) (*CacheParameterGroupNameMessage, error) {
req, out := c.ModifyCacheParameterGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opModifyCacheSubnetGroup = "ModifyCacheSubnetGroup"
// ModifyCacheSubnetGroupRequest generates a "aws/request.Request" representing the
// client's request for the ModifyCacheSubnetGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See ModifyCacheSubnetGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ModifyCacheSubnetGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ModifyCacheSubnetGroupRequest method.
// req, resp := client.ModifyCacheSubnetGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheSubnetGroup
func (c *ElastiCache) ModifyCacheSubnetGroupRequest(input *ModifyCacheSubnetGroupInput) (req *request.Request, output *ModifyCacheSubnetGroupOutput) {
op := &request.Operation{
Name: opModifyCacheSubnetGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ModifyCacheSubnetGroupInput{}
}
output = &ModifyCacheSubnetGroupOutput{}
req = c.newRequest(op, input, output)
return
}
// ModifyCacheSubnetGroup API operation for Amazon ElastiCache.
//
// Modifies an existing cache subnet group.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation ModifyCacheSubnetGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheSubnetGroupNotFoundFault "CacheSubnetGroupNotFoundFault"
// The requested cache subnet group name does not refer to an existing cache
// subnet group.
//
// * ErrCodeCacheSubnetQuotaExceededFault "CacheSubnetQuotaExceededFault"
// The request cannot be processed because it would exceed the allowed number
// of subnets in a cache subnet group.
//
// * ErrCodeSubnetInUse "SubnetInUse"
// The requested subnet is being used by another cache subnet group.
//
// * ErrCodeInvalidSubnet "InvalidSubnet"
// An invalid subnet identifier was specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheSubnetGroup
func (c *ElastiCache) ModifyCacheSubnetGroup(input *ModifyCacheSubnetGroupInput) (*ModifyCacheSubnetGroupOutput, error) {
req, out := c.ModifyCacheSubnetGroupRequest(input)
return out, req.Send()
}
// ModifyCacheSubnetGroupWithContext is the same as ModifyCacheSubnetGroup with the addition of
// the ability to pass a context and additional request options.
//
// See ModifyCacheSubnetGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) ModifyCacheSubnetGroupWithContext(ctx aws.Context, input *ModifyCacheSubnetGroupInput, opts ...request.Option) (*ModifyCacheSubnetGroupOutput, error) {
req, out := c.ModifyCacheSubnetGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opModifyReplicationGroup = "ModifyReplicationGroup"
// ModifyReplicationGroupRequest generates a "aws/request.Request" representing the
// client's request for the ModifyReplicationGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See ModifyReplicationGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ModifyReplicationGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ModifyReplicationGroupRequest method.
// req, resp := client.ModifyReplicationGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroup
func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGroupInput) (req *request.Request, output *ModifyReplicationGroupOutput) {
op := &request.Operation{
Name: opModifyReplicationGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ModifyReplicationGroupInput{}
}
output = &ModifyReplicationGroupOutput{}
req = c.newRequest(op, input, output)
return
}
// ModifyReplicationGroup API operation for Amazon ElastiCache.
//
// Modifies the settings for a replication group.
//
// Due to current limitations on Redis (cluster mode disabled), this operation
// or parameter is not supported on Redis (cluster mode enabled) replication
// groups.
//
// This operation is valid for Redis only.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation ModifyReplicationGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault"
// The specified replication group does not exist.
//
// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState"
// The requested replication group is not in the available state.
//
// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState"
// The requested cache cluster is not in the available state.
//
// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState"
// The current state of the cache security group does not allow deletion.
//
// * ErrCodeInsufficientCacheClusterCapacityFault "InsufficientCacheClusterCapacity"
// The requested cache node type is not available in the specified Availability
// Zone.
//
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeNodeQuotaForClusterExceededFault "NodeQuotaForClusterExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache nodes in a single cache cluster.
//
// * ErrCodeNodeQuotaForCustomerExceededFault "NodeQuotaForCustomerExceeded"
// The request cannot be processed because it would exceed the allowed number
// of cache nodes per customer.
//
// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound"
// The requested cache security group name does not refer to an existing cache
// security group.
//
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidVPCNetworkStateFault "InvalidVPCNetworkStateFault"
// The VPC network is in an invalid state.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroup
func (c *ElastiCache) ModifyReplicationGroup(input *ModifyReplicationGroupInput) (*ModifyReplicationGroupOutput, error) {
req, out := c.ModifyReplicationGroupRequest(input)
return out, req.Send()
}
// ModifyReplicationGroupWithContext is the same as ModifyReplicationGroup with the addition of
// the ability to pass a context and additional request options.
//
// See ModifyReplicationGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) ModifyReplicationGroupWithContext(ctx aws.Context, input *ModifyReplicationGroupInput, opts ...request.Option) (*ModifyReplicationGroupOutput, error) {
req, out := c.ModifyReplicationGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opPurchaseReservedCacheNodesOffering = "PurchaseReservedCacheNodesOffering"
// PurchaseReservedCacheNodesOfferingRequest generates a "aws/request.Request" representing the
// client's request for the PurchaseReservedCacheNodesOffering operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See PurchaseReservedCacheNodesOffering for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the PurchaseReservedCacheNodesOffering method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the PurchaseReservedCacheNodesOfferingRequest method.
// req, resp := client.PurchaseReservedCacheNodesOfferingRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PurchaseReservedCacheNodesOffering
func (c *ElastiCache) PurchaseReservedCacheNodesOfferingRequest(input *PurchaseReservedCacheNodesOfferingInput) (req *request.Request, output *PurchaseReservedCacheNodesOfferingOutput) {
op := &request.Operation{
Name: opPurchaseReservedCacheNodesOffering,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &PurchaseReservedCacheNodesOfferingInput{}
}
output = &PurchaseReservedCacheNodesOfferingOutput{}
req = c.newRequest(op, input, output)
return
}
// PurchaseReservedCacheNodesOffering API operation for Amazon ElastiCache.
//
// Allows you to purchase a reserved cache node offering.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation PurchaseReservedCacheNodesOffering for usage and error information.
//
// Returned Error Codes:
// * ErrCodeReservedCacheNodesOfferingNotFoundFault "ReservedCacheNodesOfferingNotFound"
// The requested cache node offering does not exist.
//
// * ErrCodeReservedCacheNodeAlreadyExistsFault "ReservedCacheNodeAlreadyExists"
// You already have a reservation with the given identifier.
//
// * ErrCodeReservedCacheNodeQuotaExceededFault "ReservedCacheNodeQuotaExceeded"
// The request cannot be processed because it would exceed the user's cache
// node quota.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PurchaseReservedCacheNodesOffering
func (c *ElastiCache) PurchaseReservedCacheNodesOffering(input *PurchaseReservedCacheNodesOfferingInput) (*PurchaseReservedCacheNodesOfferingOutput, error) {
req, out := c.PurchaseReservedCacheNodesOfferingRequest(input)
return out, req.Send()
}
// PurchaseReservedCacheNodesOfferingWithContext is the same as PurchaseReservedCacheNodesOffering with the addition of
// the ability to pass a context and additional request options.
//
// See PurchaseReservedCacheNodesOffering for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) PurchaseReservedCacheNodesOfferingWithContext(ctx aws.Context, input *PurchaseReservedCacheNodesOfferingInput, opts ...request.Option) (*PurchaseReservedCacheNodesOfferingOutput, error) {
req, out := c.PurchaseReservedCacheNodesOfferingRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opRebootCacheCluster = "RebootCacheCluster"
// RebootCacheClusterRequest generates a "aws/request.Request" representing the
// client's request for the RebootCacheCluster operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See RebootCacheCluster for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the RebootCacheCluster method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the RebootCacheClusterRequest method.
// req, resp := client.RebootCacheClusterRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebootCacheCluster
func (c *ElastiCache) RebootCacheClusterRequest(input *RebootCacheClusterInput) (req *request.Request, output *RebootCacheClusterOutput) {
op := &request.Operation{
Name: opRebootCacheCluster,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &RebootCacheClusterInput{}
}
output = &RebootCacheClusterOutput{}
req = c.newRequest(op, input, output)
return
}
// RebootCacheCluster API operation for Amazon ElastiCache.
//
// Reboots some, or all, of the cache nodes within a provisioned cache cluster.
// This operation applies any modified cache parameter groups to the cache cluster.
// The reboot operation takes place as soon as possible, and results in a momentary
// outage to the cache cluster. During the reboot, the cache cluster status
// is set to REBOOTING.
//
// The reboot causes the contents of the cache (for each cache node being rebooted)
// to be lost.
//
// When the reboot is complete, a cache cluster event is created.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation RebootCacheCluster for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState"
// The requested cache cluster is not in the available state.
//
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebootCacheCluster
func (c *ElastiCache) RebootCacheCluster(input *RebootCacheClusterInput) (*RebootCacheClusterOutput, error) {
req, out := c.RebootCacheClusterRequest(input)
return out, req.Send()
}
// RebootCacheClusterWithContext is the same as RebootCacheCluster with the addition of
// the ability to pass a context and additional request options.
//
// See RebootCacheCluster for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) RebootCacheClusterWithContext(ctx aws.Context, input *RebootCacheClusterInput, opts ...request.Option) (*RebootCacheClusterOutput, error) {
req, out := c.RebootCacheClusterRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opRemoveTagsFromResource = "RemoveTagsFromResource"
// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the
// client's request for the RemoveTagsFromResource operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See RemoveTagsFromResource for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the RemoveTagsFromResource method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the RemoveTagsFromResourceRequest method.
// req, resp := client.RemoveTagsFromResourceRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RemoveTagsFromResource
func (c *ElastiCache) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *TagListMessage) {
op := &request.Operation{
Name: opRemoveTagsFromResource,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &RemoveTagsFromResourceInput{}
}
output = &TagListMessage{}
req = c.newRequest(op, input, output)
return
}
// RemoveTagsFromResource API operation for Amazon ElastiCache.
//
// Removes the tags identified by the TagKeys list from the named resource.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation RemoveTagsFromResource for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheClusterNotFoundFault "CacheClusterNotFound"
// The requested cache cluster ID does not refer to an existing cache cluster.
//
// * ErrCodeSnapshotNotFoundFault "SnapshotNotFoundFault"
// The requested snapshot name does not refer to an existing snapshot.
//
// * ErrCodeInvalidARNFault "InvalidARN"
// The requested Amazon Resource Name (ARN) does not refer to an existing resource.
//
// * ErrCodeTagNotFoundFault "TagNotFound"
// The requested tag was not found on this resource.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RemoveTagsFromResource
func (c *ElastiCache) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*TagListMessage, error) {
req, out := c.RemoveTagsFromResourceRequest(input)
return out, req.Send()
}
// RemoveTagsFromResourceWithContext is the same as RemoveTagsFromResource with the addition of
// the ability to pass a context and additional request options.
//
// See RemoveTagsFromResource for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) RemoveTagsFromResourceWithContext(ctx aws.Context, input *RemoveTagsFromResourceInput, opts ...request.Option) (*TagListMessage, error) {
req, out := c.RemoveTagsFromResourceRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opResetCacheParameterGroup = "ResetCacheParameterGroup"
// ResetCacheParameterGroupRequest generates a "aws/request.Request" representing the
// client's request for the ResetCacheParameterGroup operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See ResetCacheParameterGroup for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the ResetCacheParameterGroup method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the ResetCacheParameterGroupRequest method.
// req, resp := client.ResetCacheParameterGroupRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ResetCacheParameterGroup
func (c *ElastiCache) ResetCacheParameterGroupRequest(input *ResetCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) {
op := &request.Operation{
Name: opResetCacheParameterGroup,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &ResetCacheParameterGroupInput{}
}
output = &CacheParameterGroupNameMessage{}
req = c.newRequest(op, input, output)
return
}
// ResetCacheParameterGroup API operation for Amazon ElastiCache.
//
// Modifies the parameters of a cache parameter group to the engine or system
// default value. You can reset specific parameters by submitting a list of
// parameter names. To reset the entire cache parameter group, specify the ResetAllParameters
// and CacheParameterGroupName parameters.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation ResetCacheParameterGroup for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidCacheParameterGroupStateFault "InvalidCacheParameterGroupState"
// The current state of the cache parameter group does not allow the requested
// operation to occur.
//
// * ErrCodeCacheParameterGroupNotFoundFault "CacheParameterGroupNotFound"
// The requested cache parameter group name does not refer to an existing cache
// parameter group.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ResetCacheParameterGroup
func (c *ElastiCache) ResetCacheParameterGroup(input *ResetCacheParameterGroupInput) (*CacheParameterGroupNameMessage, error) {
req, out := c.ResetCacheParameterGroupRequest(input)
return out, req.Send()
}
// ResetCacheParameterGroupWithContext is the same as ResetCacheParameterGroup with the addition of
// the ability to pass a context and additional request options.
//
// See ResetCacheParameterGroup for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) ResetCacheParameterGroupWithContext(ctx aws.Context, input *ResetCacheParameterGroupInput, opts ...request.Option) (*CacheParameterGroupNameMessage, error) {
req, out := c.ResetCacheParameterGroupRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opRevokeCacheSecurityGroupIngress = "RevokeCacheSecurityGroupIngress"
// RevokeCacheSecurityGroupIngressRequest generates a "aws/request.Request" representing the
// client's request for the RevokeCacheSecurityGroupIngress operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See RevokeCacheSecurityGroupIngress for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the RevokeCacheSecurityGroupIngress method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the RevokeCacheSecurityGroupIngressRequest method.
// req, resp := client.RevokeCacheSecurityGroupIngressRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RevokeCacheSecurityGroupIngress
func (c *ElastiCache) RevokeCacheSecurityGroupIngressRequest(input *RevokeCacheSecurityGroupIngressInput) (req *request.Request, output *RevokeCacheSecurityGroupIngressOutput) {
op := &request.Operation{
Name: opRevokeCacheSecurityGroupIngress,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &RevokeCacheSecurityGroupIngressInput{}
}
output = &RevokeCacheSecurityGroupIngressOutput{}
req = c.newRequest(op, input, output)
return
}
// RevokeCacheSecurityGroupIngress API operation for Amazon ElastiCache.
//
// Revokes ingress from a cache security group. Use this operation to disallow
// access from an Amazon EC2 security group that had been previously authorized.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation RevokeCacheSecurityGroupIngress for usage and error information.
//
// Returned Error Codes:
// * ErrCodeCacheSecurityGroupNotFoundFault "CacheSecurityGroupNotFound"
// The requested cache security group name does not refer to an existing cache
// security group.
//
// * ErrCodeAuthorizationNotFoundFault "AuthorizationNotFound"
// The specified Amazon EC2 security group is not authorized for the specified
// cache security group.
//
// * ErrCodeInvalidCacheSecurityGroupStateFault "InvalidCacheSecurityGroupState"
// The current state of the cache security group does not allow deletion.
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RevokeCacheSecurityGroupIngress
func (c *ElastiCache) RevokeCacheSecurityGroupIngress(input *RevokeCacheSecurityGroupIngressInput) (*RevokeCacheSecurityGroupIngressOutput, error) {
req, out := c.RevokeCacheSecurityGroupIngressRequest(input)
return out, req.Send()
}
// RevokeCacheSecurityGroupIngressWithContext is the same as RevokeCacheSecurityGroupIngress with the addition of
// the ability to pass a context and additional request options.
//
// See RevokeCacheSecurityGroupIngress for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) RevokeCacheSecurityGroupIngressWithContext(ctx aws.Context, input *RevokeCacheSecurityGroupIngressInput, opts ...request.Option) (*RevokeCacheSecurityGroupIngressOutput, error) {
req, out := c.RevokeCacheSecurityGroupIngressRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
const opTestFailover = "TestFailover"
// TestFailoverRequest generates a "aws/request.Request" representing the
// client's request for the TestFailover operation. The "output" return
// value can be used to capture response data after the request's "Send" method
// is called.
//
// See TestFailover for usage and error information.
//
// Creating a request object using this method should be used when you want to inject
// custom logic into the request's lifecycle using a custom handler, or if you want to
// access properties on the request object before or after sending the request. If
// you just want the service response, call the TestFailover method directly
// instead.
//
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
//
// // Example sending a request using the TestFailoverRequest method.
// req, resp := client.TestFailoverRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailover
func (c *ElastiCache) TestFailoverRequest(input *TestFailoverInput) (req *request.Request, output *TestFailoverOutput) {
op := &request.Operation{
Name: opTestFailover,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &TestFailoverInput{}
}
output = &TestFailoverOutput{}
req = c.newRequest(op, input, output)
return
}
// TestFailover API operation for Amazon ElastiCache.
//
// Represents the input of a TestFailover operation which test automatic failover
// on a specified node group (called shard in the console) in a replication
// group (called cluster in the console).
//
// Note the following
//
// * A customer can use this operation to test automatic failover on up to
// 5 shards (called node groups in the ElastiCache API and AWS CLI) in any
// rolling 24-hour period.
//
// * If calling this operation on shards in different clusters (called replication
// groups in the API and CLI), the calls can be made concurrently.
//
// * If calling this operation multiple times on different shards in the
// same Redis (cluster mode enabled) replication group, the first node replacement
// must complete before a subsequent call can be made.
//
// * To determine whether the node replacement is complete you can check
// Events using the Amazon ElastiCache console, the AWS CLI, or the ElastiCache
// API. Look for the following automatic failover related events, listed
// here in order of occurrance:
//
// Replication group message: Test Failover API called for node group <node-group-id>
//
// Cache cluster message: Failover from master node <primary-node-id> to replica
// node <node-id> completed
//
// Replication group message: Failover from master node <primary-node-id> to
// replica node <node-id> completed
//
// Cache cluster message: Recovering cache nodes <node-id>
//
// Cache cluster message: Finished recovery for cache nodes <node-id>
//
// For more information see:
//
// Viewing ElastiCache Events (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/ECEvents.Viewing.html)
// in the ElastiCache User Guide
//
// DescribeEvents (http://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_DescribeEvents.html)
// in the ElastiCache API Reference
//
// Also see, Testing Multi-AZ with Automatic Failover (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/AutoFailover.html#auto-failover-test)
// in the ElastiCache User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon ElastiCache's
// API operation TestFailover for usage and error information.
//
// Returned Error Codes:
// * ErrCodeAPICallRateForCustomerExceededFault "APICallRateForCustomerExceeded"
// The customer has exceeded the allowed rate of API calls.
//
// * ErrCodeInvalidCacheClusterStateFault "InvalidCacheClusterState"
// The requested cache cluster is not in the available state.
//
// * ErrCodeInvalidReplicationGroupStateFault "InvalidReplicationGroupState"
// The requested replication group is not in the available state.
//
// * ErrCodeNodeGroupNotFoundFault "NodeGroupNotFoundFault"
// The node group specified by the NodeGroupId parameter could not be found.
// Please verify that the node group exists and that you spelled the NodeGroupId
// value correctly.
//
// * ErrCodeReplicationGroupNotFoundFault "ReplicationGroupNotFoundFault"
// The specified replication group does not exist.
//
// * ErrCodeTestFailoverNotAvailableFault "TestFailoverNotAvailableFault"
//
// * ErrCodeInvalidParameterValueException "InvalidParameterValue"
// The value for a parameter is invalid.
//
// * ErrCodeInvalidParameterCombinationException "InvalidParameterCombination"
// Two or more incompatible parameters were specified.
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailover
func (c *ElastiCache) TestFailover(input *TestFailoverInput) (*TestFailoverOutput, error) {
req, out := c.TestFailoverRequest(input)
return out, req.Send()
}
// TestFailoverWithContext is the same as TestFailover with the addition of
// the ability to pass a context and additional request options.
//
// See TestFailover for details on how to use this API operation.
//
// The context must be non-nil and will be used for request cancellation. If
// the context is nil a panic will occur. In the future the SDK may create
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
// for more information on using Contexts.
func (c *ElastiCache) TestFailoverWithContext(ctx aws.Context, input *TestFailoverInput, opts ...request.Option) (*TestFailoverOutput, error) {
req, out := c.TestFailoverRequest(input)
req.SetContext(ctx)
req.ApplyOptions(opts...)
return out, req.Send()
}
// Represents the input of an AddTagsToResource operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AddTagsToResourceMessage
type AddTagsToResourceInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) of the resource to which the tags are to be
// added, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
// or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot.
//
// For more information about ARNs, see Amazon Resource Names (ARNs) and AWS
// Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// ResourceName is a required field
ResourceName *string `type:"string" required:"true"`
// A list of cost allocation tags to be added to this resource. A tag is a key-value
// pair. A tag key must be accompanied by a tag value.
//
// Tags is a required field
Tags []*Tag `locationNameList:"Tag" type:"list" required:"true"`
}
// String returns the string representation
func (s AddTagsToResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AddTagsToResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *AddTagsToResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"}
if s.ResourceName == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceName"))
}
if s.Tags == nil {
invalidParams.Add(request.NewErrParamRequired("Tags"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceName sets the ResourceName field's value.
func (s *AddTagsToResourceInput) SetResourceName(v string) *AddTagsToResourceInput {
s.ResourceName = &v
return s
}
// SetTags sets the Tags field's value.
func (s *AddTagsToResourceInput) SetTags(v []*Tag) *AddTagsToResourceInput {
s.Tags = v
return s
}
// Represents the input of an AuthorizeCacheSecurityGroupIngress operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AuthorizeCacheSecurityGroupIngressMessage
type AuthorizeCacheSecurityGroupIngressInput struct {
_ struct{} `type:"structure"`
// The cache security group that allows network ingress.
//
// CacheSecurityGroupName is a required field
CacheSecurityGroupName *string `type:"string" required:"true"`
// The Amazon EC2 security group to be authorized for ingress to the cache security
// group.
//
// EC2SecurityGroupName is a required field
EC2SecurityGroupName *string `type:"string" required:"true"`
// The AWS account number of the Amazon EC2 security group owner. Note that
// this is not the same thing as an AWS access key ID - you must provide a valid
// AWS account number for this parameter.
//
// EC2SecurityGroupOwnerId is a required field
EC2SecurityGroupOwnerId *string `type:"string" required:"true"`
}
// String returns the string representation
func (s AuthorizeCacheSecurityGroupIngressInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AuthorizeCacheSecurityGroupIngressInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *AuthorizeCacheSecurityGroupIngressInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "AuthorizeCacheSecurityGroupIngressInput"}
if s.CacheSecurityGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName"))
}
if s.EC2SecurityGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupName"))
}
if s.EC2SecurityGroupOwnerId == nil {
invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupOwnerId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value.
func (s *AuthorizeCacheSecurityGroupIngressInput) SetCacheSecurityGroupName(v string) *AuthorizeCacheSecurityGroupIngressInput {
s.CacheSecurityGroupName = &v
return s
}
// SetEC2SecurityGroupName sets the EC2SecurityGroupName field's value.
func (s *AuthorizeCacheSecurityGroupIngressInput) SetEC2SecurityGroupName(v string) *AuthorizeCacheSecurityGroupIngressInput {
s.EC2SecurityGroupName = &v
return s
}
// SetEC2SecurityGroupOwnerId sets the EC2SecurityGroupOwnerId field's value.
func (s *AuthorizeCacheSecurityGroupIngressInput) SetEC2SecurityGroupOwnerId(v string) *AuthorizeCacheSecurityGroupIngressInput {
s.EC2SecurityGroupOwnerId = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AuthorizeCacheSecurityGroupIngressResult
type AuthorizeCacheSecurityGroupIngressOutput struct {
_ struct{} `type:"structure"`
// Represents the output of one of the following operations:
//
// * AuthorizeCacheSecurityGroupIngress
//
// * CreateCacheSecurityGroup
//
// * RevokeCacheSecurityGroupIngress
CacheSecurityGroup *CacheSecurityGroup `type:"structure"`
}
// String returns the string representation
func (s AuthorizeCacheSecurityGroupIngressOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AuthorizeCacheSecurityGroupIngressOutput) GoString() string {
return s.String()
}
// SetCacheSecurityGroup sets the CacheSecurityGroup field's value.
func (s *AuthorizeCacheSecurityGroupIngressOutput) SetCacheSecurityGroup(v *CacheSecurityGroup) *AuthorizeCacheSecurityGroupIngressOutput {
s.CacheSecurityGroup = v
return s
}
// Describes an Availability Zone in which the cache cluster is launched.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AvailabilityZone
type AvailabilityZone struct {
_ struct{} `type:"structure"`
// The name of the Availability Zone.
Name *string `type:"string"`
}
// String returns the string representation
func (s AvailabilityZone) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s AvailabilityZone) GoString() string {
return s.String()
}
// SetName sets the Name field's value.
func (s *AvailabilityZone) SetName(v string) *AvailabilityZone {
s.Name = &v
return s
}
// Contains all of the attributes of a specific cache cluster.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheCluster
type CacheCluster struct {
_ struct{} `type:"structure"`
// This parameter is currently disabled.
AutoMinorVersionUpgrade *bool `type:"boolean"`
// The date and time when the cache cluster was created.
CacheClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The user-supplied identifier of the cache cluster. This identifier is a unique
// key that identifies a cache cluster.
CacheClusterId *string `type:"string"`
// The current state of this cache cluster, one of the following values: available,
// creating, deleted, deleting, incompatible-network, modifying, rebooting cache
// cluster nodes, restore-failed, or snapshotting.
CacheClusterStatus *string `type:"string"`
// The name of the compute and memory capacity node type for the cache cluster.
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
CacheNodeType *string `type:"string"`
// A list of cache nodes that are members of the cache cluster.
CacheNodes []*CacheNode `locationNameList:"CacheNode" type:"list"`
// Status of the cache parameter group.
CacheParameterGroup *CacheParameterGroupStatus `type:"structure"`
// A list of cache security group elements, composed of name and status sub-elements.
CacheSecurityGroups []*CacheSecurityGroupMembership `locationNameList:"CacheSecurityGroup" type:"list"`
// The name of the cache subnet group associated with the cache cluster.
CacheSubnetGroupName *string `type:"string"`
// The URL of the web page where you can download the latest ElastiCache client
// library.
ClientDownloadLandingPage *string `type:"string"`
// Represents a Memcached cluster endpoint which, if Automatic Discovery is
// enabled on the cluster, can be used by an application to connect to any node
// in the cluster. The configuration endpoint will always have .cfg in it.
//
// Example: mem-3.9dvc4r.cfg.usw2.cache.amazonaws.com:11211
ConfigurationEndpoint *Endpoint `type:"structure"`
// The name of the cache engine (memcached or redis) to be used for this cache
// cluster.
Engine *string `type:"string"`
// The version of the cache engine that is used in this cache cluster.
EngineVersion *string `type:"string"`
// Describes a notification topic and its status. Notification topics are used
// for publishing ElastiCache events to subscribers using Amazon Simple Notification
// Service (SNS).
NotificationConfiguration *NotificationConfiguration `type:"structure"`
// The number of cache nodes in the cache cluster.
//
// For clusters running Redis, this value must be 1. For clusters running Memcached,
// this value must be between 1 and 20.
NumCacheNodes *int64 `type:"integer"`
// A group of settings that are applied to the cache cluster in the future,
// or that are currently being applied.
PendingModifiedValues *PendingModifiedValues `type:"structure"`
// The name of the Availability Zone in which the cache cluster is located or
// "Multiple" if the cache nodes are located in different Availability Zones.
PreferredAvailabilityZone *string `type:"string"`
// Specifies the weekly time range during which maintenance on the cluster is
// performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
// (24H Clock UTC). The minimum maintenance window is a 60 minute period.
//
// Valid values for ddd are:
//
// * sun
//
// * mon
//
// * tue
//
// * wed
//
// * thu
//
// * fri
//
// * sat
//
// Example: sun:23:00-mon:01:30
PreferredMaintenanceWindow *string `type:"string"`
// The replication group to which this cache cluster belongs. If this field
// is empty, the cache cluster is not associated with any replication group.
ReplicationGroupId *string `type:"string"`
// A list of VPC Security Groups associated with the cache cluster.
SecurityGroups []*SecurityGroupMembership `type:"list"`
// The number of days for which ElastiCache retains automatic cache cluster
// snapshots before deleting them. For example, if you set SnapshotRetentionLimit
// to 5, a snapshot that was taken today is retained for 5 days before being
// deleted.
//
// If the value of SnapshotRetentionLimit is set to zero (0), backups are turned
// off.
SnapshotRetentionLimit *int64 `type:"integer"`
// The daily time range (in UTC) during which ElastiCache begins taking a daily
// snapshot of your cache cluster.
//
// Example: 05:00-09:00
SnapshotWindow *string `type:"string"`
}
// String returns the string representation
func (s CacheCluster) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheCluster) GoString() string {
return s.String()
}
// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value.
func (s *CacheCluster) SetAutoMinorVersionUpgrade(v bool) *CacheCluster {
s.AutoMinorVersionUpgrade = &v
return s
}
// SetCacheClusterCreateTime sets the CacheClusterCreateTime field's value.
func (s *CacheCluster) SetCacheClusterCreateTime(v time.Time) *CacheCluster {
s.CacheClusterCreateTime = &v
return s
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *CacheCluster) SetCacheClusterId(v string) *CacheCluster {
s.CacheClusterId = &v
return s
}
// SetCacheClusterStatus sets the CacheClusterStatus field's value.
func (s *CacheCluster) SetCacheClusterStatus(v string) *CacheCluster {
s.CacheClusterStatus = &v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *CacheCluster) SetCacheNodeType(v string) *CacheCluster {
s.CacheNodeType = &v
return s
}
// SetCacheNodes sets the CacheNodes field's value.
func (s *CacheCluster) SetCacheNodes(v []*CacheNode) *CacheCluster {
s.CacheNodes = v
return s
}
// SetCacheParameterGroup sets the CacheParameterGroup field's value.
func (s *CacheCluster) SetCacheParameterGroup(v *CacheParameterGroupStatus) *CacheCluster {
s.CacheParameterGroup = v
return s
}
// SetCacheSecurityGroups sets the CacheSecurityGroups field's value.
func (s *CacheCluster) SetCacheSecurityGroups(v []*CacheSecurityGroupMembership) *CacheCluster {
s.CacheSecurityGroups = v
return s
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *CacheCluster) SetCacheSubnetGroupName(v string) *CacheCluster {
s.CacheSubnetGroupName = &v
return s
}
// SetClientDownloadLandingPage sets the ClientDownloadLandingPage field's value.
func (s *CacheCluster) SetClientDownloadLandingPage(v string) *CacheCluster {
s.ClientDownloadLandingPage = &v
return s
}
// SetConfigurationEndpoint sets the ConfigurationEndpoint field's value.
func (s *CacheCluster) SetConfigurationEndpoint(v *Endpoint) *CacheCluster {
s.ConfigurationEndpoint = v
return s
}
// SetEngine sets the Engine field's value.
func (s *CacheCluster) SetEngine(v string) *CacheCluster {
s.Engine = &v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *CacheCluster) SetEngineVersion(v string) *CacheCluster {
s.EngineVersion = &v
return s
}
// SetNotificationConfiguration sets the NotificationConfiguration field's value.
func (s *CacheCluster) SetNotificationConfiguration(v *NotificationConfiguration) *CacheCluster {
s.NotificationConfiguration = v
return s
}
// SetNumCacheNodes sets the NumCacheNodes field's value.
func (s *CacheCluster) SetNumCacheNodes(v int64) *CacheCluster {
s.NumCacheNodes = &v
return s
}
// SetPendingModifiedValues sets the PendingModifiedValues field's value.
func (s *CacheCluster) SetPendingModifiedValues(v *PendingModifiedValues) *CacheCluster {
s.PendingModifiedValues = v
return s
}
// SetPreferredAvailabilityZone sets the PreferredAvailabilityZone field's value.
func (s *CacheCluster) SetPreferredAvailabilityZone(v string) *CacheCluster {
s.PreferredAvailabilityZone = &v
return s
}
// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
func (s *CacheCluster) SetPreferredMaintenanceWindow(v string) *CacheCluster {
s.PreferredMaintenanceWindow = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *CacheCluster) SetReplicationGroupId(v string) *CacheCluster {
s.ReplicationGroupId = &v
return s
}
// SetSecurityGroups sets the SecurityGroups field's value.
func (s *CacheCluster) SetSecurityGroups(v []*SecurityGroupMembership) *CacheCluster {
s.SecurityGroups = v
return s
}
// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value.
func (s *CacheCluster) SetSnapshotRetentionLimit(v int64) *CacheCluster {
s.SnapshotRetentionLimit = &v
return s
}
// SetSnapshotWindow sets the SnapshotWindow field's value.
func (s *CacheCluster) SetSnapshotWindow(v string) *CacheCluster {
s.SnapshotWindow = &v
return s
}
// Provides all of the details about a particular cache engine version.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheEngineVersion
type CacheEngineVersion struct {
_ struct{} `type:"structure"`
// The description of the cache engine.
CacheEngineDescription *string `type:"string"`
// The description of the cache engine version.
CacheEngineVersionDescription *string `type:"string"`
// The name of the cache parameter group family associated with this cache engine.
//
// Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2
CacheParameterGroupFamily *string `type:"string"`
// The name of the cache engine.
Engine *string `type:"string"`
// The version number of the cache engine.
EngineVersion *string `type:"string"`
}
// String returns the string representation
func (s CacheEngineVersion) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheEngineVersion) GoString() string {
return s.String()
}
// SetCacheEngineDescription sets the CacheEngineDescription field's value.
func (s *CacheEngineVersion) SetCacheEngineDescription(v string) *CacheEngineVersion {
s.CacheEngineDescription = &v
return s
}
// SetCacheEngineVersionDescription sets the CacheEngineVersionDescription field's value.
func (s *CacheEngineVersion) SetCacheEngineVersionDescription(v string) *CacheEngineVersion {
s.CacheEngineVersionDescription = &v
return s
}
// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value.
func (s *CacheEngineVersion) SetCacheParameterGroupFamily(v string) *CacheEngineVersion {
s.CacheParameterGroupFamily = &v
return s
}
// SetEngine sets the Engine field's value.
func (s *CacheEngineVersion) SetEngine(v string) *CacheEngineVersion {
s.Engine = &v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *CacheEngineVersion) SetEngineVersion(v string) *CacheEngineVersion {
s.EngineVersion = &v
return s
}
// Represents an individual cache node within a cache cluster. Each cache node
// runs its own instance of the cluster's protocol-compliant caching software
// - either Memcached or Redis.
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheNode
type CacheNode struct {
_ struct{} `type:"structure"`
// The date and time when the cache node was created.
CacheNodeCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The cache node identifier. A node ID is a numeric identifier (0001, 0002,
// etc.). The combination of cluster ID and node ID uniquely identifies every
// cache node used in a customer's AWS account.
CacheNodeId *string `type:"string"`
// The current state of this cache node.
CacheNodeStatus *string `type:"string"`
// The Availability Zone where this node was created and now resides.
CustomerAvailabilityZone *string `type:"string"`
// The hostname for connecting to this cache node.
Endpoint *Endpoint `type:"structure"`
// The status of the parameter group applied to this cache node.
ParameterGroupStatus *string `type:"string"`
// The ID of the primary node to which this read replica node is synchronized.
// If this field is empty, this node is not associated with a primary cache
// cluster.
SourceCacheNodeId *string `type:"string"`
}
// String returns the string representation
func (s CacheNode) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheNode) GoString() string {
return s.String()
}
// SetCacheNodeCreateTime sets the CacheNodeCreateTime field's value.
func (s *CacheNode) SetCacheNodeCreateTime(v time.Time) *CacheNode {
s.CacheNodeCreateTime = &v
return s
}
// SetCacheNodeId sets the CacheNodeId field's value.
func (s *CacheNode) SetCacheNodeId(v string) *CacheNode {
s.CacheNodeId = &v
return s
}
// SetCacheNodeStatus sets the CacheNodeStatus field's value.
func (s *CacheNode) SetCacheNodeStatus(v string) *CacheNode {
s.CacheNodeStatus = &v
return s
}
// SetCustomerAvailabilityZone sets the CustomerAvailabilityZone field's value.
func (s *CacheNode) SetCustomerAvailabilityZone(v string) *CacheNode {
s.CustomerAvailabilityZone = &v
return s
}
// SetEndpoint sets the Endpoint field's value.
func (s *CacheNode) SetEndpoint(v *Endpoint) *CacheNode {
s.Endpoint = v
return s
}
// SetParameterGroupStatus sets the ParameterGroupStatus field's value.
func (s *CacheNode) SetParameterGroupStatus(v string) *CacheNode {
s.ParameterGroupStatus = &v
return s
}
// SetSourceCacheNodeId sets the SourceCacheNodeId field's value.
func (s *CacheNode) SetSourceCacheNodeId(v string) *CacheNode {
s.SourceCacheNodeId = &v
return s
}
// A parameter that has a different value for each cache node type it is applied
// to. For example, in a Redis cache cluster, a cache.m1.large cache node type
// would have a larger maxmemory value than a cache.m1.small type.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheNodeTypeSpecificParameter
type CacheNodeTypeSpecificParameter struct {
_ struct{} `type:"structure"`
// The valid range of values for the parameter.
AllowedValues *string `type:"string"`
// A list of cache node types and their corresponding values for this parameter.
CacheNodeTypeSpecificValues []*CacheNodeTypeSpecificValue `locationNameList:"CacheNodeTypeSpecificValue" type:"list"`
// Indicates whether a change to the parameter is applied immediately or requires
// a reboot for the change to be applied. You can force a reboot or wait until
// the next maintenance window's reboot. For more information, see Rebooting
// a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Rebooting.html).
ChangeType *string `type:"string" enum:"ChangeType"`
// The valid data type for the parameter.
DataType *string `type:"string"`
// A description of the parameter.
Description *string `type:"string"`
// Indicates whether (true) or not (false) the parameter can be modified. Some
// parameters have security or operational implications that prevent them from
// being changed.
IsModifiable *bool `type:"boolean"`
// The earliest cache engine version to which the parameter can apply.
MinimumEngineVersion *string `type:"string"`
// The name of the parameter.
ParameterName *string `type:"string"`
// The source of the parameter value.
Source *string `type:"string"`
}
// String returns the string representation
func (s CacheNodeTypeSpecificParameter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheNodeTypeSpecificParameter) GoString() string {
return s.String()
}
// SetAllowedValues sets the AllowedValues field's value.
func (s *CacheNodeTypeSpecificParameter) SetAllowedValues(v string) *CacheNodeTypeSpecificParameter {
s.AllowedValues = &v
return s
}
// SetCacheNodeTypeSpecificValues sets the CacheNodeTypeSpecificValues field's value.
func (s *CacheNodeTypeSpecificParameter) SetCacheNodeTypeSpecificValues(v []*CacheNodeTypeSpecificValue) *CacheNodeTypeSpecificParameter {
s.CacheNodeTypeSpecificValues = v
return s
}
// SetChangeType sets the ChangeType field's value.
func (s *CacheNodeTypeSpecificParameter) SetChangeType(v string) *CacheNodeTypeSpecificParameter {
s.ChangeType = &v
return s
}
// SetDataType sets the DataType field's value.
func (s *CacheNodeTypeSpecificParameter) SetDataType(v string) *CacheNodeTypeSpecificParameter {
s.DataType = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CacheNodeTypeSpecificParameter) SetDescription(v string) *CacheNodeTypeSpecificParameter {
s.Description = &v
return s
}
// SetIsModifiable sets the IsModifiable field's value.
func (s *CacheNodeTypeSpecificParameter) SetIsModifiable(v bool) *CacheNodeTypeSpecificParameter {
s.IsModifiable = &v
return s
}
// SetMinimumEngineVersion sets the MinimumEngineVersion field's value.
func (s *CacheNodeTypeSpecificParameter) SetMinimumEngineVersion(v string) *CacheNodeTypeSpecificParameter {
s.MinimumEngineVersion = &v
return s
}
// SetParameterName sets the ParameterName field's value.
func (s *CacheNodeTypeSpecificParameter) SetParameterName(v string) *CacheNodeTypeSpecificParameter {
s.ParameterName = &v
return s
}
// SetSource sets the Source field's value.
func (s *CacheNodeTypeSpecificParameter) SetSource(v string) *CacheNodeTypeSpecificParameter {
s.Source = &v
return s
}
// A value that applies only to a certain cache node type.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheNodeTypeSpecificValue
type CacheNodeTypeSpecificValue struct {
_ struct{} `type:"structure"`
// The cache node type for which this value applies.
CacheNodeType *string `type:"string"`
// The value for the cache node type.
Value *string `type:"string"`
}
// String returns the string representation
func (s CacheNodeTypeSpecificValue) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheNodeTypeSpecificValue) GoString() string {
return s.String()
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *CacheNodeTypeSpecificValue) SetCacheNodeType(v string) *CacheNodeTypeSpecificValue {
s.CacheNodeType = &v
return s
}
// SetValue sets the Value field's value.
func (s *CacheNodeTypeSpecificValue) SetValue(v string) *CacheNodeTypeSpecificValue {
s.Value = &v
return s
}
// Represents the output of a CreateCacheParameterGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheParameterGroup
type CacheParameterGroup struct {
_ struct{} `type:"structure"`
// The name of the cache parameter group family that this cache parameter group
// is compatible with.
//
// Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2
CacheParameterGroupFamily *string `type:"string"`
// The name of the cache parameter group.
CacheParameterGroupName *string `type:"string"`
// The description for this cache parameter group.
Description *string `type:"string"`
}
// String returns the string representation
func (s CacheParameterGroup) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheParameterGroup) GoString() string {
return s.String()
}
// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value.
func (s *CacheParameterGroup) SetCacheParameterGroupFamily(v string) *CacheParameterGroup {
s.CacheParameterGroupFamily = &v
return s
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *CacheParameterGroup) SetCacheParameterGroupName(v string) *CacheParameterGroup {
s.CacheParameterGroupName = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CacheParameterGroup) SetDescription(v string) *CacheParameterGroup {
s.Description = &v
return s
}
// Represents the output of one of the following operations:
//
// * ModifyCacheParameterGroup
//
// * ResetCacheParameterGroup
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheParameterGroupNameMessage
type CacheParameterGroupNameMessage struct {
_ struct{} `type:"structure"`
// The name of the cache parameter group.
CacheParameterGroupName *string `type:"string"`
}
// String returns the string representation
func (s CacheParameterGroupNameMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheParameterGroupNameMessage) GoString() string {
return s.String()
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *CacheParameterGroupNameMessage) SetCacheParameterGroupName(v string) *CacheParameterGroupNameMessage {
s.CacheParameterGroupName = &v
return s
}
// Status of the cache parameter group.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheParameterGroupStatus
type CacheParameterGroupStatus struct {
_ struct{} `type:"structure"`
// A list of the cache node IDs which need to be rebooted for parameter changes
// to be applied. A node ID is a numeric identifier (0001, 0002, etc.).
CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list"`
// The name of the cache parameter group.
CacheParameterGroupName *string `type:"string"`
// The status of parameter updates.
ParameterApplyStatus *string `type:"string"`
}
// String returns the string representation
func (s CacheParameterGroupStatus) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheParameterGroupStatus) GoString() string {
return s.String()
}
// SetCacheNodeIdsToReboot sets the CacheNodeIdsToReboot field's value.
func (s *CacheParameterGroupStatus) SetCacheNodeIdsToReboot(v []*string) *CacheParameterGroupStatus {
s.CacheNodeIdsToReboot = v
return s
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *CacheParameterGroupStatus) SetCacheParameterGroupName(v string) *CacheParameterGroupStatus {
s.CacheParameterGroupName = &v
return s
}
// SetParameterApplyStatus sets the ParameterApplyStatus field's value.
func (s *CacheParameterGroupStatus) SetParameterApplyStatus(v string) *CacheParameterGroupStatus {
s.ParameterApplyStatus = &v
return s
}
// Represents the output of one of the following operations:
//
// * AuthorizeCacheSecurityGroupIngress
//
// * CreateCacheSecurityGroup
//
// * RevokeCacheSecurityGroupIngress
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheSecurityGroup
type CacheSecurityGroup struct {
_ struct{} `type:"structure"`
// The name of the cache security group.
CacheSecurityGroupName *string `type:"string"`
// The description of the cache security group.
Description *string `type:"string"`
// A list of Amazon EC2 security groups that are associated with this cache
// security group.
EC2SecurityGroups []*EC2SecurityGroup `locationNameList:"EC2SecurityGroup" type:"list"`
// The AWS account ID of the cache security group owner.
OwnerId *string `type:"string"`
}
// String returns the string representation
func (s CacheSecurityGroup) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheSecurityGroup) GoString() string {
return s.String()
}
// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value.
func (s *CacheSecurityGroup) SetCacheSecurityGroupName(v string) *CacheSecurityGroup {
s.CacheSecurityGroupName = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CacheSecurityGroup) SetDescription(v string) *CacheSecurityGroup {
s.Description = &v
return s
}
// SetEC2SecurityGroups sets the EC2SecurityGroups field's value.
func (s *CacheSecurityGroup) SetEC2SecurityGroups(v []*EC2SecurityGroup) *CacheSecurityGroup {
s.EC2SecurityGroups = v
return s
}
// SetOwnerId sets the OwnerId field's value.
func (s *CacheSecurityGroup) SetOwnerId(v string) *CacheSecurityGroup {
s.OwnerId = &v
return s
}
// Represents a cache cluster's status within a particular cache security group.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheSecurityGroupMembership
type CacheSecurityGroupMembership struct {
_ struct{} `type:"structure"`
// The name of the cache security group.
CacheSecurityGroupName *string `type:"string"`
// The membership status in the cache security group. The status changes when
// a cache security group is modified, or when the cache security groups assigned
// to a cache cluster are modified.
Status *string `type:"string"`
}
// String returns the string representation
func (s CacheSecurityGroupMembership) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheSecurityGroupMembership) GoString() string {
return s.String()
}
// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value.
func (s *CacheSecurityGroupMembership) SetCacheSecurityGroupName(v string) *CacheSecurityGroupMembership {
s.CacheSecurityGroupName = &v
return s
}
// SetStatus sets the Status field's value.
func (s *CacheSecurityGroupMembership) SetStatus(v string) *CacheSecurityGroupMembership {
s.Status = &v
return s
}
// Represents the output of one of the following operations:
//
// * CreateCacheSubnetGroup
//
// * ModifyCacheSubnetGroup
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheSubnetGroup
type CacheSubnetGroup struct {
_ struct{} `type:"structure"`
// The description of the cache subnet group.
CacheSubnetGroupDescription *string `type:"string"`
// The name of the cache subnet group.
CacheSubnetGroupName *string `type:"string"`
// A list of subnets associated with the cache subnet group.
Subnets []*Subnet `locationNameList:"Subnet" type:"list"`
// The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet
// group.
VpcId *string `type:"string"`
}
// String returns the string representation
func (s CacheSubnetGroup) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CacheSubnetGroup) GoString() string {
return s.String()
}
// SetCacheSubnetGroupDescription sets the CacheSubnetGroupDescription field's value.
func (s *CacheSubnetGroup) SetCacheSubnetGroupDescription(v string) *CacheSubnetGroup {
s.CacheSubnetGroupDescription = &v
return s
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *CacheSubnetGroup) SetCacheSubnetGroupName(v string) *CacheSubnetGroup {
s.CacheSubnetGroupName = &v
return s
}
// SetSubnets sets the Subnets field's value.
func (s *CacheSubnetGroup) SetSubnets(v []*Subnet) *CacheSubnetGroup {
s.Subnets = v
return s
}
// SetVpcId sets the VpcId field's value.
func (s *CacheSubnetGroup) SetVpcId(v string) *CacheSubnetGroup {
s.VpcId = &v
return s
}
// Represents the input of a CopySnapshotMessage operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CopySnapshotMessage
type CopySnapshotInput struct {
_ struct{} `type:"structure"`
// The name of an existing snapshot from which to make a copy.
//
// SourceSnapshotName is a required field
SourceSnapshotName *string `type:"string" required:"true"`
// The Amazon S3 bucket to which the snapshot is exported. This parameter is
// used only when exporting a snapshot for external access.
//
// When using this parameter to export a snapshot, be sure Amazon ElastiCache
// has the needed permissions to this S3 bucket. For more information, see Step
// 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess)
// in the Amazon ElastiCache User Guide.
//
// For more information, see Exporting a Snapshot (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html)
// in the Amazon ElastiCache User Guide.
TargetBucket *string `type:"string"`
// A name for the snapshot copy. ElastiCache does not permit overwriting a snapshot,
// therefore this name must be unique within its context - ElastiCache or an
// Amazon S3 bucket if exporting.
//
// TargetSnapshotName is a required field
TargetSnapshotName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CopySnapshotInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CopySnapshotInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CopySnapshotInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CopySnapshotInput"}
if s.SourceSnapshotName == nil {
invalidParams.Add(request.NewErrParamRequired("SourceSnapshotName"))
}
if s.TargetSnapshotName == nil {
invalidParams.Add(request.NewErrParamRequired("TargetSnapshotName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetSourceSnapshotName sets the SourceSnapshotName field's value.
func (s *CopySnapshotInput) SetSourceSnapshotName(v string) *CopySnapshotInput {
s.SourceSnapshotName = &v
return s
}
// SetTargetBucket sets the TargetBucket field's value.
func (s *CopySnapshotInput) SetTargetBucket(v string) *CopySnapshotInput {
s.TargetBucket = &v
return s
}
// SetTargetSnapshotName sets the TargetSnapshotName field's value.
func (s *CopySnapshotInput) SetTargetSnapshotName(v string) *CopySnapshotInput {
s.TargetSnapshotName = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CopySnapshotResult
type CopySnapshotOutput struct {
_ struct{} `type:"structure"`
// Represents a copy of an entire Redis cache cluster as of the time when the
// snapshot was taken.
Snapshot *Snapshot `type:"structure"`
}
// String returns the string representation
func (s CopySnapshotOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CopySnapshotOutput) GoString() string {
return s.String()
}
// SetSnapshot sets the Snapshot field's value.
func (s *CopySnapshotOutput) SetSnapshot(v *Snapshot) *CopySnapshotOutput {
s.Snapshot = v
return s
}
// Represents the input of a CreateCacheCluster operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheClusterMessage
type CreateCacheClusterInput struct {
_ struct{} `type:"structure"`
// Specifies whether the nodes in this Memcached cluster are created in a single
// Availability Zone or created across multiple Availability Zones in the cluster's
// region.
//
// This parameter is only supported for Memcached cache clusters.
//
// If the AZMode and PreferredAvailabilityZones are not specified, ElastiCache
// assumes single-az mode.
AZMode *string `type:"string" enum:"AZMode"`
// Reserved parameter. The password used to access a password protected server.
//
// Password constraints:
//
// * Must be only printable ASCII characters.
//
// * Must be at least 16 characters and no more than 128 characters in length.
//
// * Cannot contain any of the following characters: '/', '"', or "@".
//
// For more information, see AUTH password (http://redis.io/commands/AUTH) at
// Redis.
AuthToken *string `type:"string"`
// This parameter is currently disabled.
AutoMinorVersionUpgrade *bool `type:"boolean"`
// The node group (shard) identifier. This parameter is stored as a lowercase
// string.
//
// Constraints:
//
// * A name must contain from 1 to 20 alphanumeric characters or hyphens.
//
// * The first character must be a letter.
//
// * A name cannot end with a hyphen or contain two consecutive hyphens.
//
// CacheClusterId is a required field
CacheClusterId *string `type:"string" required:"true"`
// The compute and memory capacity of the nodes in the node group (shard).
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
CacheNodeType *string `type:"string"`
// The name of the parameter group to associate with this cache cluster. If
// this argument is omitted, the default parameter group for the specified engine
// is used. You cannot use any parameter group which has cluster-enabled='yes'
// when creating a cluster.
CacheParameterGroupName *string `type:"string"`
// A list of security group names to associate with this cache cluster.
//
// Use this parameter only when you are creating a cache cluster outside of
// an Amazon Virtual Private Cloud (Amazon VPC).
CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"`
// The name of the subnet group to be used for the cache cluster.
//
// Use this parameter only when you are creating a cache cluster in an Amazon
// Virtual Private Cloud (Amazon VPC).
//
// If you're going to launch your cluster in an Amazon VPC, you need to create
// a subnet group before you start creating a cluster. For more information,
// see Subnets and Subnet Groups (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SubnetGroups.html).
CacheSubnetGroupName *string `type:"string"`
// The name of the cache engine to be used for this cache cluster.
//
// Valid values for this parameter are: memcached | redis
Engine *string `type:"string"`
// The version number of the cache engine to be used for this cache cluster.
// To view the supported cache engine versions, use the DescribeCacheEngineVersions
// operation.
//
// Important: You can upgrade to a newer engine version (see Selecting a Cache
// Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)),
// but you cannot downgrade to an earlier engine version. If you want to use
// an earlier engine version, you must delete the existing cache cluster or
// replication group and create it anew with the earlier engine version.
EngineVersion *string `type:"string"`
// The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
// (SNS) topic to which notifications are sent.
//
// The Amazon SNS topic owner must be the same as the cache cluster owner.
NotificationTopicArn *string `type:"string"`
// The initial number of cache nodes that the cache cluster has.
//
// For clusters running Redis, this value must be 1. For clusters running Memcached,
// this value must be between 1 and 20.
//
// If you need more than 20 nodes for your Memcached cluster, please fill out
// the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/
// (http://aws.amazon.com/contact-us/elasticache-node-limit-request/).
NumCacheNodes *int64 `type:"integer"`
// The port number on which each of the cache nodes accepts connections.
Port *int64 `type:"integer"`
// The EC2 Availability Zone in which the cache cluster is created.
//
// All nodes belonging to this Memcached cache cluster are placed in the preferred
// Availability Zone. If you want to create your nodes across multiple Availability
// Zones, use PreferredAvailabilityZones.
//
// Default: System chosen Availability Zone.
PreferredAvailabilityZone *string `type:"string"`
// A list of the Availability Zones in which cache nodes are created. The order
// of the zones in the list is not important.
//
// This option is only supported on Memcached.
//
// If you are creating your cache cluster in an Amazon VPC (recommended) you
// can only locate nodes in Availability Zones that are associated with the
// subnets in the selected subnet group.
//
// The number of Availability Zones listed must equal the value of NumCacheNodes.
//
// If you want all the nodes in the same Availability Zone, use PreferredAvailabilityZone
// instead, or repeat the Availability Zone multiple times in the list.
//
// Default: System chosen Availability Zones.
PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"`
// Specifies the weekly time range during which maintenance on the cache cluster
// is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
// (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid
// values for ddd are:
//
// Specifies the weekly time range during which maintenance on the cluster is
// performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
// (24H Clock UTC). The minimum maintenance window is a 60 minute period.
//
// Valid values for ddd are:
//
// * sun
//
// * mon
//
// * tue
//
// * wed
//
// * thu
//
// * fri
//
// * sat
//
// Example: sun:23:00-mon:01:30
PreferredMaintenanceWindow *string `type:"string"`
// Due to current limitations on Redis (cluster mode disabled), this operation
// or parameter is not supported on Redis (cluster mode enabled) replication
// groups.
//
// The ID of the replication group to which this cache cluster should belong.
// If this parameter is specified, the cache cluster is added to the specified
// replication group as a read replica; otherwise, the cache cluster is a standalone
// primary that is not part of any replication group.
//
// If the specified replication group is Multi-AZ enabled and the Availability
// Zone is not specified, the cache cluster is created in Availability Zones
// that provide the best spread of read replicas across Availability Zones.
//
// This parameter is only valid if the Engine parameter is redis.
ReplicationGroupId *string `type:"string"`
// One or more VPC security groups associated with the cache cluster.
//
// Use this parameter only when you are creating a cache cluster in an Amazon
// Virtual Private Cloud (Amazon VPC).
SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"`
// A single-element string list containing an Amazon Resource Name (ARN) that
// uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
// file is used to populate the node group (shard). The Amazon S3 object name
// in the ARN cannot contain any commas.
//
// This parameter is only valid if the Engine parameter is redis.
//
// Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"`
// The name of a Redis snapshot from which to restore data into the new node
// group (shard). The snapshot status changes to restoring while the new node
// group (shard) is being created.
//
// This parameter is only valid if the Engine parameter is redis.
SnapshotName *string `type:"string"`
// The number of days for which ElastiCache retains automatic snapshots before
// deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot
// taken today is retained for 5 days before being deleted.
//
// This parameter is only valid if the Engine parameter is redis.
//
// Default: 0 (i.e., automatic backups are disabled for this cache cluster).
SnapshotRetentionLimit *int64 `type:"integer"`
// The daily time range (in UTC) during which ElastiCache begins taking a daily
// snapshot of your node group (shard).
//
// Example: 05:00-09:00
//
// If you do not specify this parameter, ElastiCache automatically chooses an
// appropriate time range.
//
// Note: This parameter is only valid if the Engine parameter is redis.
SnapshotWindow *string `type:"string"`
// A list of cost allocation tags to be added to this resource. A tag is a key-value
// pair. A tag key must be accompanied by a tag value.
Tags []*Tag `locationNameList:"Tag" type:"list"`
}
// String returns the string representation
func (s CreateCacheClusterInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateCacheClusterInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateCacheClusterInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateCacheClusterInput"}
if s.CacheClusterId == nil {
invalidParams.Add(request.NewErrParamRequired("CacheClusterId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAZMode sets the AZMode field's value.
func (s *CreateCacheClusterInput) SetAZMode(v string) *CreateCacheClusterInput {
s.AZMode = &v
return s
}
// SetAuthToken sets the AuthToken field's value.
func (s *CreateCacheClusterInput) SetAuthToken(v string) *CreateCacheClusterInput {
s.AuthToken = &v
return s
}
// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value.
func (s *CreateCacheClusterInput) SetAutoMinorVersionUpgrade(v bool) *CreateCacheClusterInput {
s.AutoMinorVersionUpgrade = &v
return s
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *CreateCacheClusterInput) SetCacheClusterId(v string) *CreateCacheClusterInput {
s.CacheClusterId = &v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *CreateCacheClusterInput) SetCacheNodeType(v string) *CreateCacheClusterInput {
s.CacheNodeType = &v
return s
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *CreateCacheClusterInput) SetCacheParameterGroupName(v string) *CreateCacheClusterInput {
s.CacheParameterGroupName = &v
return s
}
// SetCacheSecurityGroupNames sets the CacheSecurityGroupNames field's value.
func (s *CreateCacheClusterInput) SetCacheSecurityGroupNames(v []*string) *CreateCacheClusterInput {
s.CacheSecurityGroupNames = v
return s
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *CreateCacheClusterInput) SetCacheSubnetGroupName(v string) *CreateCacheClusterInput {
s.CacheSubnetGroupName = &v
return s
}
// SetEngine sets the Engine field's value.
func (s *CreateCacheClusterInput) SetEngine(v string) *CreateCacheClusterInput {
s.Engine = &v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *CreateCacheClusterInput) SetEngineVersion(v string) *CreateCacheClusterInput {
s.EngineVersion = &v
return s
}
// SetNotificationTopicArn sets the NotificationTopicArn field's value.
func (s *CreateCacheClusterInput) SetNotificationTopicArn(v string) *CreateCacheClusterInput {
s.NotificationTopicArn = &v
return s
}
// SetNumCacheNodes sets the NumCacheNodes field's value.
func (s *CreateCacheClusterInput) SetNumCacheNodes(v int64) *CreateCacheClusterInput {
s.NumCacheNodes = &v
return s
}
// SetPort sets the Port field's value.
func (s *CreateCacheClusterInput) SetPort(v int64) *CreateCacheClusterInput {
s.Port = &v
return s
}
// SetPreferredAvailabilityZone sets the PreferredAvailabilityZone field's value.
func (s *CreateCacheClusterInput) SetPreferredAvailabilityZone(v string) *CreateCacheClusterInput {
s.PreferredAvailabilityZone = &v
return s
}
// SetPreferredAvailabilityZones sets the PreferredAvailabilityZones field's value.
func (s *CreateCacheClusterInput) SetPreferredAvailabilityZones(v []*string) *CreateCacheClusterInput {
s.PreferredAvailabilityZones = v
return s
}
// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
func (s *CreateCacheClusterInput) SetPreferredMaintenanceWindow(v string) *CreateCacheClusterInput {
s.PreferredMaintenanceWindow = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *CreateCacheClusterInput) SetReplicationGroupId(v string) *CreateCacheClusterInput {
s.ReplicationGroupId = &v
return s
}
// SetSecurityGroupIds sets the SecurityGroupIds field's value.
func (s *CreateCacheClusterInput) SetSecurityGroupIds(v []*string) *CreateCacheClusterInput {
s.SecurityGroupIds = v
return s
}
// SetSnapshotArns sets the SnapshotArns field's value.
func (s *CreateCacheClusterInput) SetSnapshotArns(v []*string) *CreateCacheClusterInput {
s.SnapshotArns = v
return s
}
// SetSnapshotName sets the SnapshotName field's value.
func (s *CreateCacheClusterInput) SetSnapshotName(v string) *CreateCacheClusterInput {
s.SnapshotName = &v
return s
}
// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value.
func (s *CreateCacheClusterInput) SetSnapshotRetentionLimit(v int64) *CreateCacheClusterInput {
s.SnapshotRetentionLimit = &v
return s
}
// SetSnapshotWindow sets the SnapshotWindow field's value.
func (s *CreateCacheClusterInput) SetSnapshotWindow(v string) *CreateCacheClusterInput {
s.SnapshotWindow = &v
return s
}
// SetTags sets the Tags field's value.
func (s *CreateCacheClusterInput) SetTags(v []*Tag) *CreateCacheClusterInput {
s.Tags = v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheClusterResult
type CreateCacheClusterOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific cache cluster.
CacheCluster *CacheCluster `type:"structure"`
}
// String returns the string representation
func (s CreateCacheClusterOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateCacheClusterOutput) GoString() string {
return s.String()
}
// SetCacheCluster sets the CacheCluster field's value.
func (s *CreateCacheClusterOutput) SetCacheCluster(v *CacheCluster) *CreateCacheClusterOutput {
s.CacheCluster = v
return s
}
// Represents the input of a CreateCacheParameterGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheParameterGroupMessage
type CreateCacheParameterGroupInput struct {
_ struct{} `type:"structure"`
// The name of the cache parameter group family that the cache parameter group
// can be used with.
//
// Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2
//
// CacheParameterGroupFamily is a required field
CacheParameterGroupFamily *string `type:"string" required:"true"`
// A user-specified name for the cache parameter group.
//
// CacheParameterGroupName is a required field
CacheParameterGroupName *string `type:"string" required:"true"`
// A user-specified description for the cache parameter group.
//
// Description is a required field
Description *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CreateCacheParameterGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateCacheParameterGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateCacheParameterGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateCacheParameterGroupInput"}
if s.CacheParameterGroupFamily == nil {
invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupFamily"))
}
if s.CacheParameterGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName"))
}
if s.Description == nil {
invalidParams.Add(request.NewErrParamRequired("Description"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value.
func (s *CreateCacheParameterGroupInput) SetCacheParameterGroupFamily(v string) *CreateCacheParameterGroupInput {
s.CacheParameterGroupFamily = &v
return s
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *CreateCacheParameterGroupInput) SetCacheParameterGroupName(v string) *CreateCacheParameterGroupInput {
s.CacheParameterGroupName = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CreateCacheParameterGroupInput) SetDescription(v string) *CreateCacheParameterGroupInput {
s.Description = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheParameterGroupResult
type CreateCacheParameterGroupOutput struct {
_ struct{} `type:"structure"`
// Represents the output of a CreateCacheParameterGroup operation.
CacheParameterGroup *CacheParameterGroup `type:"structure"`
}
// String returns the string representation
func (s CreateCacheParameterGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateCacheParameterGroupOutput) GoString() string {
return s.String()
}
// SetCacheParameterGroup sets the CacheParameterGroup field's value.
func (s *CreateCacheParameterGroupOutput) SetCacheParameterGroup(v *CacheParameterGroup) *CreateCacheParameterGroupOutput {
s.CacheParameterGroup = v
return s
}
// Represents the input of a CreateCacheSecurityGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSecurityGroupMessage
type CreateCacheSecurityGroupInput struct {
_ struct{} `type:"structure"`
// A name for the cache security group. This value is stored as a lowercase
// string.
//
// Constraints: Must contain no more than 255 alphanumeric characters. Cannot
// be the word "Default".
//
// Example: mysecuritygroup
//
// CacheSecurityGroupName is a required field
CacheSecurityGroupName *string `type:"string" required:"true"`
// A description for the cache security group.
//
// Description is a required field
Description *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CreateCacheSecurityGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateCacheSecurityGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateCacheSecurityGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateCacheSecurityGroupInput"}
if s.CacheSecurityGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName"))
}
if s.Description == nil {
invalidParams.Add(request.NewErrParamRequired("Description"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value.
func (s *CreateCacheSecurityGroupInput) SetCacheSecurityGroupName(v string) *CreateCacheSecurityGroupInput {
s.CacheSecurityGroupName = &v
return s
}
// SetDescription sets the Description field's value.
func (s *CreateCacheSecurityGroupInput) SetDescription(v string) *CreateCacheSecurityGroupInput {
s.Description = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSecurityGroupResult
type CreateCacheSecurityGroupOutput struct {
_ struct{} `type:"structure"`
// Represents the output of one of the following operations:
//
// * AuthorizeCacheSecurityGroupIngress
//
// * CreateCacheSecurityGroup
//
// * RevokeCacheSecurityGroupIngress
CacheSecurityGroup *CacheSecurityGroup `type:"structure"`
}
// String returns the string representation
func (s CreateCacheSecurityGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateCacheSecurityGroupOutput) GoString() string {
return s.String()
}
// SetCacheSecurityGroup sets the CacheSecurityGroup field's value.
func (s *CreateCacheSecurityGroupOutput) SetCacheSecurityGroup(v *CacheSecurityGroup) *CreateCacheSecurityGroupOutput {
s.CacheSecurityGroup = v
return s
}
// Represents the input of a CreateCacheSubnetGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSubnetGroupMessage
type CreateCacheSubnetGroupInput struct {
_ struct{} `type:"structure"`
// A description for the cache subnet group.
//
// CacheSubnetGroupDescription is a required field
CacheSubnetGroupDescription *string `type:"string" required:"true"`
// A name for the cache subnet group. This value is stored as a lowercase string.
//
// Constraints: Must contain no more than 255 alphanumeric characters or hyphens.
//
// Example: mysubnetgroup
//
// CacheSubnetGroupName is a required field
CacheSubnetGroupName *string `type:"string" required:"true"`
// A list of VPC subnet IDs for the cache subnet group.
//
// SubnetIds is a required field
SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list" required:"true"`
}
// String returns the string representation
func (s CreateCacheSubnetGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateCacheSubnetGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateCacheSubnetGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateCacheSubnetGroupInput"}
if s.CacheSubnetGroupDescription == nil {
invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupDescription"))
}
if s.CacheSubnetGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName"))
}
if s.SubnetIds == nil {
invalidParams.Add(request.NewErrParamRequired("SubnetIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheSubnetGroupDescription sets the CacheSubnetGroupDescription field's value.
func (s *CreateCacheSubnetGroupInput) SetCacheSubnetGroupDescription(v string) *CreateCacheSubnetGroupInput {
s.CacheSubnetGroupDescription = &v
return s
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *CreateCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *CreateCacheSubnetGroupInput {
s.CacheSubnetGroupName = &v
return s
}
// SetSubnetIds sets the SubnetIds field's value.
func (s *CreateCacheSubnetGroupInput) SetSubnetIds(v []*string) *CreateCacheSubnetGroupInput {
s.SubnetIds = v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateCacheSubnetGroupResult
type CreateCacheSubnetGroupOutput struct {
_ struct{} `type:"structure"`
// Represents the output of one of the following operations:
//
// * CreateCacheSubnetGroup
//
// * ModifyCacheSubnetGroup
CacheSubnetGroup *CacheSubnetGroup `type:"structure"`
}
// String returns the string representation
func (s CreateCacheSubnetGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateCacheSubnetGroupOutput) GoString() string {
return s.String()
}
// SetCacheSubnetGroup sets the CacheSubnetGroup field's value.
func (s *CreateCacheSubnetGroupOutput) SetCacheSubnetGroup(v *CacheSubnetGroup) *CreateCacheSubnetGroupOutput {
s.CacheSubnetGroup = v
return s
}
// Represents the input of a CreateReplicationGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateReplicationGroupMessage
type CreateReplicationGroupInput struct {
_ struct{} `type:"structure"`
// Reserved parameter. The password used to access a password protected server.
//
// Password constraints:
//
// * Must be only printable ASCII characters.
//
// * Must be at least 16 characters and no more than 128 characters in length.
//
// * Cannot contain any of the following characters: '/', '"', or "@".
//
// For more information, see AUTH password (http://redis.io/commands/AUTH) at
// Redis.
AuthToken *string `type:"string"`
// This parameter is currently disabled.
AutoMinorVersionUpgrade *bool `type:"boolean"`
// Specifies whether a read-only replica is automatically promoted to read/write
// primary if the existing primary fails.
//
// If true, Multi-AZ is enabled for this replication group. If false, Multi-AZ
// is disabled for this replication group.
//
// AutomaticFailoverEnabled must be enabled for Redis (cluster mode enabled)
// replication groups.
//
// Default: false
//
// ElastiCache Multi-AZ replication groups is not supported on:
//
// Redis versions earlier than 2.8.6.
//
// Redis (cluster mode disabled): T1 and T2 node types.
//
// Redis (cluster mode enabled): T2 node types.
AutomaticFailoverEnabled *bool `type:"boolean"`
// The compute and memory capacity of the nodes in the node group (shard).
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
CacheNodeType *string `type:"string"`
// The name of the parameter group to associate with this replication group.
// If this argument is omitted, the default cache parameter group for the specified
// engine is used.
//
// If you are running Redis version 3.2.4 or later, only one node group (shard),
// and want to use a default parameter group, we recommend that you specify
// the parameter group by name.
//
// * To create a Redis (cluster mode disabled) replication group, use CacheParameterGroupName=default.redis3.2.
//
// * To create a Redis (cluster mode enabled) replication group, use CacheParameterGroupName=default.redis3.2.cluster.on.
CacheParameterGroupName *string `type:"string"`
// A list of cache security group names to associate with this replication group.
CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"`
// The name of the cache subnet group to be used for the replication group.
//
// If you're going to launch your cluster in an Amazon VPC, you need to create
// a subnet group before you start creating a cluster. For more information,
// see Subnets and Subnet Groups (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SubnetGroups.html).
CacheSubnetGroupName *string `type:"string"`
// The name of the cache engine to be used for the cache clusters in this replication
// group.
Engine *string `type:"string"`
// The version number of the cache engine to be used for the cache clusters
// in this replication group. To view the supported cache engine versions, use
// the DescribeCacheEngineVersions operation.
//
// Important: You can upgrade to a newer engine version (see Selecting a Cache
// Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement))
// in the ElastiCache User Guide, but you cannot downgrade to an earlier engine
// version. If you want to use an earlier engine version, you must delete the
// existing cache cluster or replication group and create it anew with the earlier
// engine version.
EngineVersion *string `type:"string"`
// A list of node group (shard) configuration options. Each node group (shard)
// configuration has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones,
// ReplicaCount.
//
// If you're creating a Redis (cluster mode disabled) or a Redis (cluster mode
// enabled) replication group, you can use this parameter to individually configure
// each node group (shard), or you can omit this parameter.
NodeGroupConfiguration []*NodeGroupConfiguration `locationNameList:"NodeGroupConfiguration" type:"list"`
// The Amazon Resource Name (ARN) of the Amazon Simple Notification Service
// (SNS) topic to which notifications are sent.
//
// The Amazon SNS topic owner must be the same as the cache cluster owner.
NotificationTopicArn *string `type:"string"`
// The number of clusters this replication group initially has.
//
// This parameter is not used if there is more than one node group (shard).
// You should use ReplicasPerNodeGroup instead.
//
// If AutomaticFailoverEnabled is true, the value of this parameter must be
// at least 2. If AutomaticFailoverEnabled is false you can omit this parameter
// (it will default to 1), or you can explicitly set it to a value between 2
// and 6.
//
// The maximum permitted value for NumCacheClusters is 6 (primary plus 5 replicas).
NumCacheClusters *int64 `type:"integer"`
// An optional parameter that specifies the number of node groups (shards) for
// this Redis (cluster mode enabled) replication group. For Redis (cluster mode
// disabled) either omit this parameter or set it to 1.
//
// Default: 1
NumNodeGroups *int64 `type:"integer"`
// The port number on which each member of the replication group accepts connections.
Port *int64 `type:"integer"`
// A list of EC2 Availability Zones in which the replication group's cache clusters
// are created. The order of the Availability Zones in the list is the order
// in which clusters are allocated. The primary cluster is created in the first
// AZ in the list.
//
// This parameter is not used if there is more than one node group (shard).
// You should use NodeGroupConfiguration instead.
//
// If you are creating your replication group in an Amazon VPC (recommended),
// you can only locate cache clusters in Availability Zones associated with
// the subnets in the selected subnet group.
//
// The number of Availability Zones listed must equal the value of NumCacheClusters.
//
// Default: system chosen Availability Zones.
PreferredCacheClusterAZs []*string `locationNameList:"AvailabilityZone" type:"list"`
// Specifies the weekly time range during which maintenance on the cache cluster
// is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
// (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid
// values for ddd are:
//
// Specifies the weekly time range during which maintenance on the cluster is
// performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
// (24H Clock UTC). The minimum maintenance window is a 60 minute period.
//
// Valid values for ddd are:
//
// * sun
//
// * mon
//
// * tue
//
// * wed
//
// * thu
//
// * fri
//
// * sat
//
// Example: sun:23:00-mon:01:30
PreferredMaintenanceWindow *string `type:"string"`
// The identifier of the cache cluster that serves as the primary for this replication
// group. This cache cluster must already exist and have a status of available.
//
// This parameter is not required if NumCacheClusters, NumNodeGroups, or ReplicasPerNodeGroup
// is specified.
PrimaryClusterId *string `type:"string"`
// An optional parameter that specifies the number of replica nodes in each
// node group (shard). Valid values are 0 to 5.
ReplicasPerNodeGroup *int64 `type:"integer"`
// A user-created description for the replication group.
//
// ReplicationGroupDescription is a required field
ReplicationGroupDescription *string `type:"string" required:"true"`
// The replication group identifier. This parameter is stored as a lowercase
// string.
//
// Constraints:
//
// * A name must contain from 1 to 20 alphanumeric characters or hyphens.
//
// * The first character must be a letter.
//
// * A name cannot end with a hyphen or contain two consecutive hyphens.
//
// ReplicationGroupId is a required field
ReplicationGroupId *string `type:"string" required:"true"`
// One or more Amazon VPC security groups associated with this replication group.
//
// Use this parameter only when you are creating a replication group in an Amazon
// Virtual Private Cloud (Amazon VPC).
SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"`
// A list of Amazon Resource Names (ARN) that uniquely identify the Redis RDB
// snapshot files stored in Amazon S3. The snapshot files are used to populate
// the new replication group. The Amazon S3 object name in the ARN cannot contain
// any commas. The new replication group will have the number of node groups
// (console: shards) specified by the parameter NumNodeGroups or the number
// of node groups configured by NodeGroupConfiguration regardless of the number
// of ARNs specified here.
//
// This parameter is only valid if the Engine parameter is redis.
//
// Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb
SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"`
// The name of a snapshot from which to restore data into the new replication
// group. The snapshot status changes to restoring while the new replication
// group is being created.
//
// This parameter is only valid if the Engine parameter is redis.
SnapshotName *string `type:"string"`
// The number of days for which ElastiCache retains automatic snapshots before
// deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot
// that was taken today is retained for 5 days before being deleted.
//
// This parameter is only valid if the Engine parameter is redis.
//
// Default: 0 (i.e., automatic backups are disabled for this cache cluster).
SnapshotRetentionLimit *int64 `type:"integer"`
// The daily time range (in UTC) during which ElastiCache begins taking a daily
// snapshot of your node group (shard).
//
// Example: 05:00-09:00
//
// If you do not specify this parameter, ElastiCache automatically chooses an
// appropriate time range.
//
// This parameter is only valid if the Engine parameter is redis.
SnapshotWindow *string `type:"string"`
// A list of cost allocation tags to be added to this resource. A tag is a key-value
// pair. A tag key must be accompanied by a tag value.
Tags []*Tag `locationNameList:"Tag" type:"list"`
}
// String returns the string representation
func (s CreateReplicationGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateReplicationGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateReplicationGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateReplicationGroupInput"}
if s.ReplicationGroupDescription == nil {
invalidParams.Add(request.NewErrParamRequired("ReplicationGroupDescription"))
}
if s.ReplicationGroupId == nil {
invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAuthToken sets the AuthToken field's value.
func (s *CreateReplicationGroupInput) SetAuthToken(v string) *CreateReplicationGroupInput {
s.AuthToken = &v
return s
}
// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value.
func (s *CreateReplicationGroupInput) SetAutoMinorVersionUpgrade(v bool) *CreateReplicationGroupInput {
s.AutoMinorVersionUpgrade = &v
return s
}
// SetAutomaticFailoverEnabled sets the AutomaticFailoverEnabled field's value.
func (s *CreateReplicationGroupInput) SetAutomaticFailoverEnabled(v bool) *CreateReplicationGroupInput {
s.AutomaticFailoverEnabled = &v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *CreateReplicationGroupInput) SetCacheNodeType(v string) *CreateReplicationGroupInput {
s.CacheNodeType = &v
return s
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *CreateReplicationGroupInput) SetCacheParameterGroupName(v string) *CreateReplicationGroupInput {
s.CacheParameterGroupName = &v
return s
}
// SetCacheSecurityGroupNames sets the CacheSecurityGroupNames field's value.
func (s *CreateReplicationGroupInput) SetCacheSecurityGroupNames(v []*string) *CreateReplicationGroupInput {
s.CacheSecurityGroupNames = v
return s
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *CreateReplicationGroupInput) SetCacheSubnetGroupName(v string) *CreateReplicationGroupInput {
s.CacheSubnetGroupName = &v
return s
}
// SetEngine sets the Engine field's value.
func (s *CreateReplicationGroupInput) SetEngine(v string) *CreateReplicationGroupInput {
s.Engine = &v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *CreateReplicationGroupInput) SetEngineVersion(v string) *CreateReplicationGroupInput {
s.EngineVersion = &v
return s
}
// SetNodeGroupConfiguration sets the NodeGroupConfiguration field's value.
func (s *CreateReplicationGroupInput) SetNodeGroupConfiguration(v []*NodeGroupConfiguration) *CreateReplicationGroupInput {
s.NodeGroupConfiguration = v
return s
}
// SetNotificationTopicArn sets the NotificationTopicArn field's value.
func (s *CreateReplicationGroupInput) SetNotificationTopicArn(v string) *CreateReplicationGroupInput {
s.NotificationTopicArn = &v
return s
}
// SetNumCacheClusters sets the NumCacheClusters field's value.
func (s *CreateReplicationGroupInput) SetNumCacheClusters(v int64) *CreateReplicationGroupInput {
s.NumCacheClusters = &v
return s
}
// SetNumNodeGroups sets the NumNodeGroups field's value.
func (s *CreateReplicationGroupInput) SetNumNodeGroups(v int64) *CreateReplicationGroupInput {
s.NumNodeGroups = &v
return s
}
// SetPort sets the Port field's value.
func (s *CreateReplicationGroupInput) SetPort(v int64) *CreateReplicationGroupInput {
s.Port = &v
return s
}
// SetPreferredCacheClusterAZs sets the PreferredCacheClusterAZs field's value.
func (s *CreateReplicationGroupInput) SetPreferredCacheClusterAZs(v []*string) *CreateReplicationGroupInput {
s.PreferredCacheClusterAZs = v
return s
}
// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
func (s *CreateReplicationGroupInput) SetPreferredMaintenanceWindow(v string) *CreateReplicationGroupInput {
s.PreferredMaintenanceWindow = &v
return s
}
// SetPrimaryClusterId sets the PrimaryClusterId field's value.
func (s *CreateReplicationGroupInput) SetPrimaryClusterId(v string) *CreateReplicationGroupInput {
s.PrimaryClusterId = &v
return s
}
// SetReplicasPerNodeGroup sets the ReplicasPerNodeGroup field's value.
func (s *CreateReplicationGroupInput) SetReplicasPerNodeGroup(v int64) *CreateReplicationGroupInput {
s.ReplicasPerNodeGroup = &v
return s
}
// SetReplicationGroupDescription sets the ReplicationGroupDescription field's value.
func (s *CreateReplicationGroupInput) SetReplicationGroupDescription(v string) *CreateReplicationGroupInput {
s.ReplicationGroupDescription = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *CreateReplicationGroupInput) SetReplicationGroupId(v string) *CreateReplicationGroupInput {
s.ReplicationGroupId = &v
return s
}
// SetSecurityGroupIds sets the SecurityGroupIds field's value.
func (s *CreateReplicationGroupInput) SetSecurityGroupIds(v []*string) *CreateReplicationGroupInput {
s.SecurityGroupIds = v
return s
}
// SetSnapshotArns sets the SnapshotArns field's value.
func (s *CreateReplicationGroupInput) SetSnapshotArns(v []*string) *CreateReplicationGroupInput {
s.SnapshotArns = v
return s
}
// SetSnapshotName sets the SnapshotName field's value.
func (s *CreateReplicationGroupInput) SetSnapshotName(v string) *CreateReplicationGroupInput {
s.SnapshotName = &v
return s
}
// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value.
func (s *CreateReplicationGroupInput) SetSnapshotRetentionLimit(v int64) *CreateReplicationGroupInput {
s.SnapshotRetentionLimit = &v
return s
}
// SetSnapshotWindow sets the SnapshotWindow field's value.
func (s *CreateReplicationGroupInput) SetSnapshotWindow(v string) *CreateReplicationGroupInput {
s.SnapshotWindow = &v
return s
}
// SetTags sets the Tags field's value.
func (s *CreateReplicationGroupInput) SetTags(v []*Tag) *CreateReplicationGroupInput {
s.Tags = v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateReplicationGroupResult
type CreateReplicationGroupOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific Redis replication group.
ReplicationGroup *ReplicationGroup `type:"structure"`
}
// String returns the string representation
func (s CreateReplicationGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateReplicationGroupOutput) GoString() string {
return s.String()
}
// SetReplicationGroup sets the ReplicationGroup field's value.
func (s *CreateReplicationGroupOutput) SetReplicationGroup(v *ReplicationGroup) *CreateReplicationGroupOutput {
s.ReplicationGroup = v
return s
}
// Represents the input of a CreateSnapshot operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateSnapshotMessage
type CreateSnapshotInput struct {
_ struct{} `type:"structure"`
// The identifier of an existing cache cluster. The snapshot is created from
// this cache cluster.
CacheClusterId *string `type:"string"`
// The identifier of an existing replication group. The snapshot is created
// from this replication group.
ReplicationGroupId *string `type:"string"`
// A name for the snapshot being created.
//
// SnapshotName is a required field
SnapshotName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s CreateSnapshotInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateSnapshotInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *CreateSnapshotInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "CreateSnapshotInput"}
if s.SnapshotName == nil {
invalidParams.Add(request.NewErrParamRequired("SnapshotName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *CreateSnapshotInput) SetCacheClusterId(v string) *CreateSnapshotInput {
s.CacheClusterId = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *CreateSnapshotInput) SetReplicationGroupId(v string) *CreateSnapshotInput {
s.ReplicationGroupId = &v
return s
}
// SetSnapshotName sets the SnapshotName field's value.
func (s *CreateSnapshotInput) SetSnapshotName(v string) *CreateSnapshotInput {
s.SnapshotName = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CreateSnapshotResult
type CreateSnapshotOutput struct {
_ struct{} `type:"structure"`
// Represents a copy of an entire Redis cache cluster as of the time when the
// snapshot was taken.
Snapshot *Snapshot `type:"structure"`
}
// String returns the string representation
func (s CreateSnapshotOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s CreateSnapshotOutput) GoString() string {
return s.String()
}
// SetSnapshot sets the Snapshot field's value.
func (s *CreateSnapshotOutput) SetSnapshot(v *Snapshot) *CreateSnapshotOutput {
s.Snapshot = v
return s
}
// Represents the input of a DeleteCacheCluster operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheClusterMessage
type DeleteCacheClusterInput struct {
_ struct{} `type:"structure"`
// The cache cluster identifier for the cluster to be deleted. This parameter
// is not case sensitive.
//
// CacheClusterId is a required field
CacheClusterId *string `type:"string" required:"true"`
// The user-supplied name of a final cache cluster snapshot. This is the unique
// name that identifies the snapshot. ElastiCache creates the snapshot, and
// then deletes the cache cluster immediately afterward.
FinalSnapshotIdentifier *string `type:"string"`
}
// String returns the string representation
func (s DeleteCacheClusterInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteCacheClusterInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteCacheClusterInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteCacheClusterInput"}
if s.CacheClusterId == nil {
invalidParams.Add(request.NewErrParamRequired("CacheClusterId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *DeleteCacheClusterInput) SetCacheClusterId(v string) *DeleteCacheClusterInput {
s.CacheClusterId = &v
return s
}
// SetFinalSnapshotIdentifier sets the FinalSnapshotIdentifier field's value.
func (s *DeleteCacheClusterInput) SetFinalSnapshotIdentifier(v string) *DeleteCacheClusterInput {
s.FinalSnapshotIdentifier = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheClusterResult
type DeleteCacheClusterOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific cache cluster.
CacheCluster *CacheCluster `type:"structure"`
}
// String returns the string representation
func (s DeleteCacheClusterOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteCacheClusterOutput) GoString() string {
return s.String()
}
// SetCacheCluster sets the CacheCluster field's value.
func (s *DeleteCacheClusterOutput) SetCacheCluster(v *CacheCluster) *DeleteCacheClusterOutput {
s.CacheCluster = v
return s
}
// Represents the input of a DeleteCacheParameterGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheParameterGroupMessage
type DeleteCacheParameterGroupInput struct {
_ struct{} `type:"structure"`
// The name of the cache parameter group to delete.
//
// The specified cache security group must not be associated with any cache
// clusters.
//
// CacheParameterGroupName is a required field
CacheParameterGroupName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteCacheParameterGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteCacheParameterGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteCacheParameterGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteCacheParameterGroupInput"}
if s.CacheParameterGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *DeleteCacheParameterGroupInput) SetCacheParameterGroupName(v string) *DeleteCacheParameterGroupInput {
s.CacheParameterGroupName = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheParameterGroupOutput
type DeleteCacheParameterGroupOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteCacheParameterGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteCacheParameterGroupOutput) GoString() string {
return s.String()
}
// Represents the input of a DeleteCacheSecurityGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSecurityGroupMessage
type DeleteCacheSecurityGroupInput struct {
_ struct{} `type:"structure"`
// The name of the cache security group to delete.
//
// You cannot delete the default security group.
//
// CacheSecurityGroupName is a required field
CacheSecurityGroupName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteCacheSecurityGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteCacheSecurityGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteCacheSecurityGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteCacheSecurityGroupInput"}
if s.CacheSecurityGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value.
func (s *DeleteCacheSecurityGroupInput) SetCacheSecurityGroupName(v string) *DeleteCacheSecurityGroupInput {
s.CacheSecurityGroupName = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSecurityGroupOutput
type DeleteCacheSecurityGroupOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteCacheSecurityGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteCacheSecurityGroupOutput) GoString() string {
return s.String()
}
// Represents the input of a DeleteCacheSubnetGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSubnetGroupMessage
type DeleteCacheSubnetGroupInput struct {
_ struct{} `type:"structure"`
// The name of the cache subnet group to delete.
//
// Constraints: Must contain no more than 255 alphanumeric characters or hyphens.
//
// CacheSubnetGroupName is a required field
CacheSubnetGroupName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteCacheSubnetGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteCacheSubnetGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteCacheSubnetGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteCacheSubnetGroupInput"}
if s.CacheSubnetGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *DeleteCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *DeleteCacheSubnetGroupInput {
s.CacheSubnetGroupName = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteCacheSubnetGroupOutput
type DeleteCacheSubnetGroupOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
func (s DeleteCacheSubnetGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteCacheSubnetGroupOutput) GoString() string {
return s.String()
}
// Represents the input of a DeleteReplicationGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteReplicationGroupMessage
type DeleteReplicationGroupInput struct {
_ struct{} `type:"structure"`
// The name of a final node group (shard) snapshot. ElastiCache creates the
// snapshot from the primary node in the cluster, rather than one of the replicas;
// this is to ensure that it captures the freshest data. After the final snapshot
// is taken, the replication group is immediately deleted.
FinalSnapshotIdentifier *string `type:"string"`
// The identifier for the cluster to be deleted. This parameter is not case
// sensitive.
//
// ReplicationGroupId is a required field
ReplicationGroupId *string `type:"string" required:"true"`
// If set to true, all of the read replicas are deleted, but the primary node
// is retained.
RetainPrimaryCluster *bool `type:"boolean"`
}
// String returns the string representation
func (s DeleteReplicationGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteReplicationGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteReplicationGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupInput"}
if s.ReplicationGroupId == nil {
invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetFinalSnapshotIdentifier sets the FinalSnapshotIdentifier field's value.
func (s *DeleteReplicationGroupInput) SetFinalSnapshotIdentifier(v string) *DeleteReplicationGroupInput {
s.FinalSnapshotIdentifier = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *DeleteReplicationGroupInput) SetReplicationGroupId(v string) *DeleteReplicationGroupInput {
s.ReplicationGroupId = &v
return s
}
// SetRetainPrimaryCluster sets the RetainPrimaryCluster field's value.
func (s *DeleteReplicationGroupInput) SetRetainPrimaryCluster(v bool) *DeleteReplicationGroupInput {
s.RetainPrimaryCluster = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteReplicationGroupResult
type DeleteReplicationGroupOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific Redis replication group.
ReplicationGroup *ReplicationGroup `type:"structure"`
}
// String returns the string representation
func (s DeleteReplicationGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteReplicationGroupOutput) GoString() string {
return s.String()
}
// SetReplicationGroup sets the ReplicationGroup field's value.
func (s *DeleteReplicationGroupOutput) SetReplicationGroup(v *ReplicationGroup) *DeleteReplicationGroupOutput {
s.ReplicationGroup = v
return s
}
// Represents the input of a DeleteSnapshot operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteSnapshotMessage
type DeleteSnapshotInput struct {
_ struct{} `type:"structure"`
// The name of the snapshot to be deleted.
//
// SnapshotName is a required field
SnapshotName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s DeleteSnapshotInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteSnapshotInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DeleteSnapshotInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DeleteSnapshotInput"}
if s.SnapshotName == nil {
invalidParams.Add(request.NewErrParamRequired("SnapshotName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetSnapshotName sets the SnapshotName field's value.
func (s *DeleteSnapshotInput) SetSnapshotName(v string) *DeleteSnapshotInput {
s.SnapshotName = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DeleteSnapshotResult
type DeleteSnapshotOutput struct {
_ struct{} `type:"structure"`
// Represents a copy of an entire Redis cache cluster as of the time when the
// snapshot was taken.
Snapshot *Snapshot `type:"structure"`
}
// String returns the string representation
func (s DeleteSnapshotOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DeleteSnapshotOutput) GoString() string {
return s.String()
}
// SetSnapshot sets the Snapshot field's value.
func (s *DeleteSnapshotOutput) SetSnapshot(v *Snapshot) *DeleteSnapshotOutput {
s.Snapshot = v
return s
}
// Represents the input of a DescribeCacheClusters operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheClustersMessage
type DescribeCacheClustersInput struct {
_ struct{} `type:"structure"`
// The user-supplied cluster identifier. If this parameter is specified, only
// information about that specific cache cluster is returned. This parameter
// isn't case sensitive.
CacheClusterId *string `type:"string"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
// An optional flag that can be included in the DescribeCacheCluster request
// to show only nodes (API/CLI: clusters) that are not members of a replication
// group. In practice, this mean Memcached and single node Redis clusters.
ShowCacheClustersNotInReplicationGroups *bool `type:"boolean"`
// An optional flag that can be included in the DescribeCacheCluster request
// to retrieve information about the individual cache nodes.
ShowCacheNodeInfo *bool `type:"boolean"`
}
// String returns the string representation
func (s DescribeCacheClustersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheClustersInput) GoString() string {
return s.String()
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *DescribeCacheClustersInput) SetCacheClusterId(v string) *DescribeCacheClustersInput {
s.CacheClusterId = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheClustersInput) SetMarker(v string) *DescribeCacheClustersInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeCacheClustersInput) SetMaxRecords(v int64) *DescribeCacheClustersInput {
s.MaxRecords = &v
return s
}
// SetShowCacheClustersNotInReplicationGroups sets the ShowCacheClustersNotInReplicationGroups field's value.
func (s *DescribeCacheClustersInput) SetShowCacheClustersNotInReplicationGroups(v bool) *DescribeCacheClustersInput {
s.ShowCacheClustersNotInReplicationGroups = &v
return s
}
// SetShowCacheNodeInfo sets the ShowCacheNodeInfo field's value.
func (s *DescribeCacheClustersInput) SetShowCacheNodeInfo(v bool) *DescribeCacheClustersInput {
s.ShowCacheNodeInfo = &v
return s
}
// Represents the output of a DescribeCacheClusters operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheClusterMessage
type DescribeCacheClustersOutput struct {
_ struct{} `type:"structure"`
// A list of cache clusters. Each item in the list contains detailed information
// about one cache cluster.
CacheClusters []*CacheCluster `locationNameList:"CacheCluster" type:"list"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
}
// String returns the string representation
func (s DescribeCacheClustersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheClustersOutput) GoString() string {
return s.String()
}
// SetCacheClusters sets the CacheClusters field's value.
func (s *DescribeCacheClustersOutput) SetCacheClusters(v []*CacheCluster) *DescribeCacheClustersOutput {
s.CacheClusters = v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheClustersOutput) SetMarker(v string) *DescribeCacheClustersOutput {
s.Marker = &v
return s
}
// Represents the input of a DescribeCacheEngineVersions operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheEngineVersionsMessage
type DescribeCacheEngineVersionsInput struct {
_ struct{} `type:"structure"`
// The name of a specific cache parameter group family to return details for.
//
// Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2
//
// Constraints:
//
// * Must be 1 to 255 alphanumeric characters
//
// * First character must be a letter
//
// * Cannot end with a hyphen or contain two consecutive hyphens
CacheParameterGroupFamily *string `type:"string"`
// If true, specifies that only the default version of the specified engine
// or engine and major version combination is to be returned.
DefaultOnly *bool `type:"boolean"`
// The cache engine to return. Valid values: memcached | redis
Engine *string `type:"string"`
// The cache engine version to return.
//
// Example: 1.4.14
EngineVersion *string `type:"string"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
}
// String returns the string representation
func (s DescribeCacheEngineVersionsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheEngineVersionsInput) GoString() string {
return s.String()
}
// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value.
func (s *DescribeCacheEngineVersionsInput) SetCacheParameterGroupFamily(v string) *DescribeCacheEngineVersionsInput {
s.CacheParameterGroupFamily = &v
return s
}
// SetDefaultOnly sets the DefaultOnly field's value.
func (s *DescribeCacheEngineVersionsInput) SetDefaultOnly(v bool) *DescribeCacheEngineVersionsInput {
s.DefaultOnly = &v
return s
}
// SetEngine sets the Engine field's value.
func (s *DescribeCacheEngineVersionsInput) SetEngine(v string) *DescribeCacheEngineVersionsInput {
s.Engine = &v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *DescribeCacheEngineVersionsInput) SetEngineVersion(v string) *DescribeCacheEngineVersionsInput {
s.EngineVersion = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheEngineVersionsInput) SetMarker(v string) *DescribeCacheEngineVersionsInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeCacheEngineVersionsInput) SetMaxRecords(v int64) *DescribeCacheEngineVersionsInput {
s.MaxRecords = &v
return s
}
// Represents the output of a DescribeCacheEngineVersions operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheEngineVersionMessage
type DescribeCacheEngineVersionsOutput struct {
_ struct{} `type:"structure"`
// A list of cache engine version details. Each element in the list contains
// detailed information about one cache engine version.
CacheEngineVersions []*CacheEngineVersion `locationNameList:"CacheEngineVersion" type:"list"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
}
// String returns the string representation
func (s DescribeCacheEngineVersionsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheEngineVersionsOutput) GoString() string {
return s.String()
}
// SetCacheEngineVersions sets the CacheEngineVersions field's value.
func (s *DescribeCacheEngineVersionsOutput) SetCacheEngineVersions(v []*CacheEngineVersion) *DescribeCacheEngineVersionsOutput {
s.CacheEngineVersions = v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheEngineVersionsOutput) SetMarker(v string) *DescribeCacheEngineVersionsOutput {
s.Marker = &v
return s
}
// Represents the input of a DescribeCacheParameterGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParameterGroupsMessage
type DescribeCacheParameterGroupsInput struct {
_ struct{} `type:"structure"`
// The name of a specific cache parameter group to return details for.
CacheParameterGroupName *string `type:"string"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
}
// String returns the string representation
func (s DescribeCacheParameterGroupsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheParameterGroupsInput) GoString() string {
return s.String()
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *DescribeCacheParameterGroupsInput) SetCacheParameterGroupName(v string) *DescribeCacheParameterGroupsInput {
s.CacheParameterGroupName = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheParameterGroupsInput) SetMarker(v string) *DescribeCacheParameterGroupsInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeCacheParameterGroupsInput) SetMaxRecords(v int64) *DescribeCacheParameterGroupsInput {
s.MaxRecords = &v
return s
}
// Represents the output of a DescribeCacheParameterGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheParameterGroupsMessage
type DescribeCacheParameterGroupsOutput struct {
_ struct{} `type:"structure"`
// A list of cache parameter groups. Each element in the list contains detailed
// information about one cache parameter group.
CacheParameterGroups []*CacheParameterGroup `locationNameList:"CacheParameterGroup" type:"list"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
}
// String returns the string representation
func (s DescribeCacheParameterGroupsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheParameterGroupsOutput) GoString() string {
return s.String()
}
// SetCacheParameterGroups sets the CacheParameterGroups field's value.
func (s *DescribeCacheParameterGroupsOutput) SetCacheParameterGroups(v []*CacheParameterGroup) *DescribeCacheParameterGroupsOutput {
s.CacheParameterGroups = v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheParameterGroupsOutput) SetMarker(v string) *DescribeCacheParameterGroupsOutput {
s.Marker = &v
return s
}
// Represents the input of a DescribeCacheParameters operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheParametersMessage
type DescribeCacheParametersInput struct {
_ struct{} `type:"structure"`
// The name of a specific cache parameter group to return details for.
//
// CacheParameterGroupName is a required field
CacheParameterGroupName *string `type:"string" required:"true"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
// The parameter types to return.
//
// Valid values: user | system | engine-default
Source *string `type:"string"`
}
// String returns the string representation
func (s DescribeCacheParametersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheParametersInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeCacheParametersInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeCacheParametersInput"}
if s.CacheParameterGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *DescribeCacheParametersInput) SetCacheParameterGroupName(v string) *DescribeCacheParametersInput {
s.CacheParameterGroupName = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheParametersInput) SetMarker(v string) *DescribeCacheParametersInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeCacheParametersInput) SetMaxRecords(v int64) *DescribeCacheParametersInput {
s.MaxRecords = &v
return s
}
// SetSource sets the Source field's value.
func (s *DescribeCacheParametersInput) SetSource(v string) *DescribeCacheParametersInput {
s.Source = &v
return s
}
// Represents the output of a DescribeCacheParameters operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheParameterGroupDetails
type DescribeCacheParametersOutput struct {
_ struct{} `type:"structure"`
// A list of parameters specific to a particular cache node type. Each element
// in the list contains detailed information about one parameter.
CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
// A list of Parameter instances.
Parameters []*Parameter `locationNameList:"Parameter" type:"list"`
}
// String returns the string representation
func (s DescribeCacheParametersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheParametersOutput) GoString() string {
return s.String()
}
// SetCacheNodeTypeSpecificParameters sets the CacheNodeTypeSpecificParameters field's value.
func (s *DescribeCacheParametersOutput) SetCacheNodeTypeSpecificParameters(v []*CacheNodeTypeSpecificParameter) *DescribeCacheParametersOutput {
s.CacheNodeTypeSpecificParameters = v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheParametersOutput) SetMarker(v string) *DescribeCacheParametersOutput {
s.Marker = &v
return s
}
// SetParameters sets the Parameters field's value.
func (s *DescribeCacheParametersOutput) SetParameters(v []*Parameter) *DescribeCacheParametersOutput {
s.Parameters = v
return s
}
// Represents the input of a DescribeCacheSecurityGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSecurityGroupsMessage
type DescribeCacheSecurityGroupsInput struct {
_ struct{} `type:"structure"`
// The name of the cache security group to return details for.
CacheSecurityGroupName *string `type:"string"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
}
// String returns the string representation
func (s DescribeCacheSecurityGroupsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheSecurityGroupsInput) GoString() string {
return s.String()
}
// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value.
func (s *DescribeCacheSecurityGroupsInput) SetCacheSecurityGroupName(v string) *DescribeCacheSecurityGroupsInput {
s.CacheSecurityGroupName = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheSecurityGroupsInput) SetMarker(v string) *DescribeCacheSecurityGroupsInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeCacheSecurityGroupsInput) SetMaxRecords(v int64) *DescribeCacheSecurityGroupsInput {
s.MaxRecords = &v
return s
}
// Represents the output of a DescribeCacheSecurityGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheSecurityGroupMessage
type DescribeCacheSecurityGroupsOutput struct {
_ struct{} `type:"structure"`
// A list of cache security groups. Each element in the list contains detailed
// information about one group.
CacheSecurityGroups []*CacheSecurityGroup `locationNameList:"CacheSecurityGroup" type:"list"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
}
// String returns the string representation
func (s DescribeCacheSecurityGroupsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheSecurityGroupsOutput) GoString() string {
return s.String()
}
// SetCacheSecurityGroups sets the CacheSecurityGroups field's value.
func (s *DescribeCacheSecurityGroupsOutput) SetCacheSecurityGroups(v []*CacheSecurityGroup) *DescribeCacheSecurityGroupsOutput {
s.CacheSecurityGroups = v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheSecurityGroupsOutput) SetMarker(v string) *DescribeCacheSecurityGroupsOutput {
s.Marker = &v
return s
}
// Represents the input of a DescribeCacheSubnetGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeCacheSubnetGroupsMessage
type DescribeCacheSubnetGroupsInput struct {
_ struct{} `type:"structure"`
// The name of the cache subnet group to return details for.
CacheSubnetGroupName *string `type:"string"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
}
// String returns the string representation
func (s DescribeCacheSubnetGroupsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheSubnetGroupsInput) GoString() string {
return s.String()
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *DescribeCacheSubnetGroupsInput) SetCacheSubnetGroupName(v string) *DescribeCacheSubnetGroupsInput {
s.CacheSubnetGroupName = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheSubnetGroupsInput) SetMarker(v string) *DescribeCacheSubnetGroupsInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeCacheSubnetGroupsInput) SetMaxRecords(v int64) *DescribeCacheSubnetGroupsInput {
s.MaxRecords = &v
return s
}
// Represents the output of a DescribeCacheSubnetGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/CacheSubnetGroupMessage
type DescribeCacheSubnetGroupsOutput struct {
_ struct{} `type:"structure"`
// A list of cache subnet groups. Each element in the list contains detailed
// information about one group.
CacheSubnetGroups []*CacheSubnetGroup `locationNameList:"CacheSubnetGroup" type:"list"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
}
// String returns the string representation
func (s DescribeCacheSubnetGroupsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeCacheSubnetGroupsOutput) GoString() string {
return s.String()
}
// SetCacheSubnetGroups sets the CacheSubnetGroups field's value.
func (s *DescribeCacheSubnetGroupsOutput) SetCacheSubnetGroups(v []*CacheSubnetGroup) *DescribeCacheSubnetGroupsOutput {
s.CacheSubnetGroups = v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeCacheSubnetGroupsOutput) SetMarker(v string) *DescribeCacheSubnetGroupsOutput {
s.Marker = &v
return s
}
// Represents the input of a DescribeEngineDefaultParameters operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEngineDefaultParametersMessage
type DescribeEngineDefaultParametersInput struct {
_ struct{} `type:"structure"`
// The name of the cache parameter group family.
//
// Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2
//
// CacheParameterGroupFamily is a required field
CacheParameterGroupFamily *string `type:"string" required:"true"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
}
// String returns the string representation
func (s DescribeEngineDefaultParametersInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeEngineDefaultParametersInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *DescribeEngineDefaultParametersInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "DescribeEngineDefaultParametersInput"}
if s.CacheParameterGroupFamily == nil {
invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupFamily"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value.
func (s *DescribeEngineDefaultParametersInput) SetCacheParameterGroupFamily(v string) *DescribeEngineDefaultParametersInput {
s.CacheParameterGroupFamily = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeEngineDefaultParametersInput) SetMarker(v string) *DescribeEngineDefaultParametersInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeEngineDefaultParametersInput) SetMaxRecords(v int64) *DescribeEngineDefaultParametersInput {
s.MaxRecords = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEngineDefaultParametersResult
type DescribeEngineDefaultParametersOutput struct {
_ struct{} `type:"structure"`
// Represents the output of a DescribeEngineDefaultParameters operation.
EngineDefaults *EngineDefaults `type:"structure"`
}
// String returns the string representation
func (s DescribeEngineDefaultParametersOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeEngineDefaultParametersOutput) GoString() string {
return s.String()
}
// SetEngineDefaults sets the EngineDefaults field's value.
func (s *DescribeEngineDefaultParametersOutput) SetEngineDefaults(v *EngineDefaults) *DescribeEngineDefaultParametersOutput {
s.EngineDefaults = v
return s
}
// Represents the input of a DescribeEvents operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeEventsMessage
type DescribeEventsInput struct {
_ struct{} `type:"structure"`
// The number of minutes worth of events to retrieve.
Duration *int64 `type:"integer"`
// The end of the time interval for which to retrieve events, specified in ISO
// 8601 format.
//
// Example: 2017-03-30T07:03:49.555Z
EndTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
// The identifier of the event source for which events are returned. If not
// specified, all sources are included in the response.
SourceIdentifier *string `type:"string"`
// The event source to retrieve events for. If no value is specified, all events
// are returned.
SourceType *string `type:"string" enum:"SourceType"`
// The beginning of the time interval to retrieve events for, specified in ISO
// 8601 format.
//
// Example: 2017-03-30T07:03:49.555Z
StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
}
// String returns the string representation
func (s DescribeEventsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeEventsInput) GoString() string {
return s.String()
}
// SetDuration sets the Duration field's value.
func (s *DescribeEventsInput) SetDuration(v int64) *DescribeEventsInput {
s.Duration = &v
return s
}
// SetEndTime sets the EndTime field's value.
func (s *DescribeEventsInput) SetEndTime(v time.Time) *DescribeEventsInput {
s.EndTime = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeEventsInput) SetMarker(v string) *DescribeEventsInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeEventsInput) SetMaxRecords(v int64) *DescribeEventsInput {
s.MaxRecords = &v
return s
}
// SetSourceIdentifier sets the SourceIdentifier field's value.
func (s *DescribeEventsInput) SetSourceIdentifier(v string) *DescribeEventsInput {
s.SourceIdentifier = &v
return s
}
// SetSourceType sets the SourceType field's value.
func (s *DescribeEventsInput) SetSourceType(v string) *DescribeEventsInput {
s.SourceType = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *DescribeEventsInput) SetStartTime(v time.Time) *DescribeEventsInput {
s.StartTime = &v
return s
}
// Represents the output of a DescribeEvents operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/EventsMessage
type DescribeEventsOutput struct {
_ struct{} `type:"structure"`
// A list of events. Each element in the list contains detailed information
// about one event.
Events []*Event `locationNameList:"Event" type:"list"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
}
// String returns the string representation
func (s DescribeEventsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeEventsOutput) GoString() string {
return s.String()
}
// SetEvents sets the Events field's value.
func (s *DescribeEventsOutput) SetEvents(v []*Event) *DescribeEventsOutput {
s.Events = v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeEventsOutput) SetMarker(v string) *DescribeEventsOutput {
s.Marker = &v
return s
}
// Represents the input of a DescribeReplicationGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReplicationGroupsMessage
type DescribeReplicationGroupsInput struct {
_ struct{} `type:"structure"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
// The identifier for the replication group to be described. This parameter
// is not case sensitive.
//
// If you do not specify this parameter, information about all replication groups
// is returned.
ReplicationGroupId *string `type:"string"`
}
// String returns the string representation
func (s DescribeReplicationGroupsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeReplicationGroupsInput) GoString() string {
return s.String()
}
// SetMarker sets the Marker field's value.
func (s *DescribeReplicationGroupsInput) SetMarker(v string) *DescribeReplicationGroupsInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeReplicationGroupsInput) SetMaxRecords(v int64) *DescribeReplicationGroupsInput {
s.MaxRecords = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *DescribeReplicationGroupsInput) SetReplicationGroupId(v string) *DescribeReplicationGroupsInput {
s.ReplicationGroupId = &v
return s
}
// Represents the output of a DescribeReplicationGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ReplicationGroupMessage
type DescribeReplicationGroupsOutput struct {
_ struct{} `type:"structure"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
// A list of replication groups. Each item in the list contains detailed information
// about one replication group.
ReplicationGroups []*ReplicationGroup `locationNameList:"ReplicationGroup" type:"list"`
}
// String returns the string representation
func (s DescribeReplicationGroupsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeReplicationGroupsOutput) GoString() string {
return s.String()
}
// SetMarker sets the Marker field's value.
func (s *DescribeReplicationGroupsOutput) SetMarker(v string) *DescribeReplicationGroupsOutput {
s.Marker = &v
return s
}
// SetReplicationGroups sets the ReplicationGroups field's value.
func (s *DescribeReplicationGroupsOutput) SetReplicationGroups(v []*ReplicationGroup) *DescribeReplicationGroupsOutput {
s.ReplicationGroups = v
return s
}
// Represents the input of a DescribeReservedCacheNodes operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodesMessage
type DescribeReservedCacheNodesInput struct {
_ struct{} `type:"structure"`
// The cache node type filter value. Use this parameter to show only those reservations
// matching the specified cache node type.
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
CacheNodeType *string `type:"string"`
// The duration filter value, specified in years or seconds. Use this parameter
// to show only reservations for this duration.
//
// Valid Values: 1 | 3 | 31536000 | 94608000
Duration *string `type:"string"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
// The offering type filter value. Use this parameter to show only the available
// offerings matching the specified offering type.
//
// Valid values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization"
OfferingType *string `type:"string"`
// The product description filter value. Use this parameter to show only those
// reservations matching the specified product description.
ProductDescription *string `type:"string"`
// The reserved cache node identifier filter value. Use this parameter to show
// only the reservation that matches the specified reservation ID.
ReservedCacheNodeId *string `type:"string"`
// The offering identifier filter value. Use this parameter to show only purchased
// reservations matching the specified offering identifier.
ReservedCacheNodesOfferingId *string `type:"string"`
}
// String returns the string representation
func (s DescribeReservedCacheNodesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeReservedCacheNodesInput) GoString() string {
return s.String()
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *DescribeReservedCacheNodesInput) SetCacheNodeType(v string) *DescribeReservedCacheNodesInput {
s.CacheNodeType = &v
return s
}
// SetDuration sets the Duration field's value.
func (s *DescribeReservedCacheNodesInput) SetDuration(v string) *DescribeReservedCacheNodesInput {
s.Duration = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeReservedCacheNodesInput) SetMarker(v string) *DescribeReservedCacheNodesInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeReservedCacheNodesInput) SetMaxRecords(v int64) *DescribeReservedCacheNodesInput {
s.MaxRecords = &v
return s
}
// SetOfferingType sets the OfferingType field's value.
func (s *DescribeReservedCacheNodesInput) SetOfferingType(v string) *DescribeReservedCacheNodesInput {
s.OfferingType = &v
return s
}
// SetProductDescription sets the ProductDescription field's value.
func (s *DescribeReservedCacheNodesInput) SetProductDescription(v string) *DescribeReservedCacheNodesInput {
s.ProductDescription = &v
return s
}
// SetReservedCacheNodeId sets the ReservedCacheNodeId field's value.
func (s *DescribeReservedCacheNodesInput) SetReservedCacheNodeId(v string) *DescribeReservedCacheNodesInput {
s.ReservedCacheNodeId = &v
return s
}
// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value.
func (s *DescribeReservedCacheNodesInput) SetReservedCacheNodesOfferingId(v string) *DescribeReservedCacheNodesInput {
s.ReservedCacheNodesOfferingId = &v
return s
}
// Represents the input of a DescribeReservedCacheNodesOfferings operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeReservedCacheNodesOfferingsMessage
type DescribeReservedCacheNodesOfferingsInput struct {
_ struct{} `type:"structure"`
// The cache node type filter value. Use this parameter to show only the available
// offerings matching the specified cache node type.
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
CacheNodeType *string `type:"string"`
// Duration filter value, specified in years or seconds. Use this parameter
// to show only reservations for a given duration.
//
// Valid Values: 1 | 3 | 31536000 | 94608000
Duration *string `type:"string"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 100
//
// Constraints: minimum 20; maximum 100.
MaxRecords *int64 `type:"integer"`
// The offering type filter value. Use this parameter to show only the available
// offerings matching the specified offering type.
//
// Valid Values: "Light Utilization"|"Medium Utilization"|"Heavy Utilization"
OfferingType *string `type:"string"`
// The product description filter value. Use this parameter to show only the
// available offerings matching the specified product description.
ProductDescription *string `type:"string"`
// The offering identifier filter value. Use this parameter to show only the
// available offering that matches the specified reservation identifier.
//
// Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
ReservedCacheNodesOfferingId *string `type:"string"`
}
// String returns the string representation
func (s DescribeReservedCacheNodesOfferingsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeReservedCacheNodesOfferingsInput) GoString() string {
return s.String()
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *DescribeReservedCacheNodesOfferingsInput) SetCacheNodeType(v string) *DescribeReservedCacheNodesOfferingsInput {
s.CacheNodeType = &v
return s
}
// SetDuration sets the Duration field's value.
func (s *DescribeReservedCacheNodesOfferingsInput) SetDuration(v string) *DescribeReservedCacheNodesOfferingsInput {
s.Duration = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeReservedCacheNodesOfferingsInput) SetMarker(v string) *DescribeReservedCacheNodesOfferingsInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeReservedCacheNodesOfferingsInput) SetMaxRecords(v int64) *DescribeReservedCacheNodesOfferingsInput {
s.MaxRecords = &v
return s
}
// SetOfferingType sets the OfferingType field's value.
func (s *DescribeReservedCacheNodesOfferingsInput) SetOfferingType(v string) *DescribeReservedCacheNodesOfferingsInput {
s.OfferingType = &v
return s
}
// SetProductDescription sets the ProductDescription field's value.
func (s *DescribeReservedCacheNodesOfferingsInput) SetProductDescription(v string) *DescribeReservedCacheNodesOfferingsInput {
s.ProductDescription = &v
return s
}
// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value.
func (s *DescribeReservedCacheNodesOfferingsInput) SetReservedCacheNodesOfferingId(v string) *DescribeReservedCacheNodesOfferingsInput {
s.ReservedCacheNodesOfferingId = &v
return s
}
// Represents the output of a DescribeReservedCacheNodesOfferings operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ReservedCacheNodesOfferingMessage
type DescribeReservedCacheNodesOfferingsOutput struct {
_ struct{} `type:"structure"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
// A list of reserved cache node offerings. Each element in the list contains
// detailed information about one offering.
ReservedCacheNodesOfferings []*ReservedCacheNodesOffering `locationNameList:"ReservedCacheNodesOffering" type:"list"`
}
// String returns the string representation
func (s DescribeReservedCacheNodesOfferingsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeReservedCacheNodesOfferingsOutput) GoString() string {
return s.String()
}
// SetMarker sets the Marker field's value.
func (s *DescribeReservedCacheNodesOfferingsOutput) SetMarker(v string) *DescribeReservedCacheNodesOfferingsOutput {
s.Marker = &v
return s
}
// SetReservedCacheNodesOfferings sets the ReservedCacheNodesOfferings field's value.
func (s *DescribeReservedCacheNodesOfferingsOutput) SetReservedCacheNodesOfferings(v []*ReservedCacheNodesOffering) *DescribeReservedCacheNodesOfferingsOutput {
s.ReservedCacheNodesOfferings = v
return s
}
// Represents the output of a DescribeReservedCacheNodes operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ReservedCacheNodeMessage
type DescribeReservedCacheNodesOutput struct {
_ struct{} `type:"structure"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
// A list of reserved cache nodes. Each element in the list contains detailed
// information about one node.
ReservedCacheNodes []*ReservedCacheNode `locationNameList:"ReservedCacheNode" type:"list"`
}
// String returns the string representation
func (s DescribeReservedCacheNodesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeReservedCacheNodesOutput) GoString() string {
return s.String()
}
// SetMarker sets the Marker field's value.
func (s *DescribeReservedCacheNodesOutput) SetMarker(v string) *DescribeReservedCacheNodesOutput {
s.Marker = &v
return s
}
// SetReservedCacheNodes sets the ReservedCacheNodes field's value.
func (s *DescribeReservedCacheNodesOutput) SetReservedCacheNodes(v []*ReservedCacheNode) *DescribeReservedCacheNodesOutput {
s.ReservedCacheNodes = v
return s
}
// Represents the input of a DescribeSnapshotsMessage operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshotsMessage
type DescribeSnapshotsInput struct {
_ struct{} `type:"structure"`
// A user-supplied cluster identifier. If this parameter is specified, only
// snapshots associated with that specific cache cluster are described.
CacheClusterId *string `type:"string"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// The maximum number of records to include in the response. If more records
// exist than the specified MaxRecords value, a marker is included in the response
// so that the remaining results can be retrieved.
//
// Default: 50
//
// Constraints: minimum 20; maximum 50.
MaxRecords *int64 `type:"integer"`
// A user-supplied replication group identifier. If this parameter is specified,
// only snapshots associated with that specific replication group are described.
ReplicationGroupId *string `type:"string"`
// A Boolean value which if true, the node group (shard) configuration is included
// in the snapshot description.
ShowNodeGroupConfig *bool `type:"boolean"`
// A user-supplied name of the snapshot. If this parameter is specified, only
// this snapshot are described.
SnapshotName *string `type:"string"`
// If set to system, the output shows snapshots that were automatically created
// by ElastiCache. If set to user the output shows snapshots that were manually
// created. If omitted, the output shows both automatically and manually created
// snapshots.
SnapshotSource *string `type:"string"`
}
// String returns the string representation
func (s DescribeSnapshotsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeSnapshotsInput) GoString() string {
return s.String()
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *DescribeSnapshotsInput) SetCacheClusterId(v string) *DescribeSnapshotsInput {
s.CacheClusterId = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *DescribeSnapshotsInput) SetMarker(v string) *DescribeSnapshotsInput {
s.Marker = &v
return s
}
// SetMaxRecords sets the MaxRecords field's value.
func (s *DescribeSnapshotsInput) SetMaxRecords(v int64) *DescribeSnapshotsInput {
s.MaxRecords = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *DescribeSnapshotsInput) SetReplicationGroupId(v string) *DescribeSnapshotsInput {
s.ReplicationGroupId = &v
return s
}
// SetShowNodeGroupConfig sets the ShowNodeGroupConfig field's value.
func (s *DescribeSnapshotsInput) SetShowNodeGroupConfig(v bool) *DescribeSnapshotsInput {
s.ShowNodeGroupConfig = &v
return s
}
// SetSnapshotName sets the SnapshotName field's value.
func (s *DescribeSnapshotsInput) SetSnapshotName(v string) *DescribeSnapshotsInput {
s.SnapshotName = &v
return s
}
// SetSnapshotSource sets the SnapshotSource field's value.
func (s *DescribeSnapshotsInput) SetSnapshotSource(v string) *DescribeSnapshotsInput {
s.SnapshotSource = &v
return s
}
// Represents the output of a DescribeSnapshots operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/DescribeSnapshotsListMessage
type DescribeSnapshotsOutput struct {
_ struct{} `type:"structure"`
// An optional marker returned from a prior request. Use this marker for pagination
// of results from this operation. If this parameter is specified, the response
// includes only records beyond the marker, up to the value specified by MaxRecords.
Marker *string `type:"string"`
// A list of snapshots. Each item in the list contains detailed information
// about one snapshot.
Snapshots []*Snapshot `locationNameList:"Snapshot" type:"list"`
}
// String returns the string representation
func (s DescribeSnapshotsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s DescribeSnapshotsOutput) GoString() string {
return s.String()
}
// SetMarker sets the Marker field's value.
func (s *DescribeSnapshotsOutput) SetMarker(v string) *DescribeSnapshotsOutput {
s.Marker = &v
return s
}
// SetSnapshots sets the Snapshots field's value.
func (s *DescribeSnapshotsOutput) SetSnapshots(v []*Snapshot) *DescribeSnapshotsOutput {
s.Snapshots = v
return s
}
// Provides ownership and status information for an Amazon EC2 security group.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/EC2SecurityGroup
type EC2SecurityGroup struct {
_ struct{} `type:"structure"`
// The name of the Amazon EC2 security group.
EC2SecurityGroupName *string `type:"string"`
// The AWS account ID of the Amazon EC2 security group owner.
EC2SecurityGroupOwnerId *string `type:"string"`
// The status of the Amazon EC2 security group.
Status *string `type:"string"`
}
// String returns the string representation
func (s EC2SecurityGroup) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EC2SecurityGroup) GoString() string {
return s.String()
}
// SetEC2SecurityGroupName sets the EC2SecurityGroupName field's value.
func (s *EC2SecurityGroup) SetEC2SecurityGroupName(v string) *EC2SecurityGroup {
s.EC2SecurityGroupName = &v
return s
}
// SetEC2SecurityGroupOwnerId sets the EC2SecurityGroupOwnerId field's value.
func (s *EC2SecurityGroup) SetEC2SecurityGroupOwnerId(v string) *EC2SecurityGroup {
s.EC2SecurityGroupOwnerId = &v
return s
}
// SetStatus sets the Status field's value.
func (s *EC2SecurityGroup) SetStatus(v string) *EC2SecurityGroup {
s.Status = &v
return s
}
// Represents the information required for client programs to connect to a cache
// node.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/Endpoint
type Endpoint struct {
_ struct{} `type:"structure"`
// The DNS hostname of the cache node.
Address *string `type:"string"`
// The port number that the cache engine is listening on.
Port *int64 `type:"integer"`
}
// String returns the string representation
func (s Endpoint) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Endpoint) GoString() string {
return s.String()
}
// SetAddress sets the Address field's value.
func (s *Endpoint) SetAddress(v string) *Endpoint {
s.Address = &v
return s
}
// SetPort sets the Port field's value.
func (s *Endpoint) SetPort(v int64) *Endpoint {
s.Port = &v
return s
}
// Represents the output of a DescribeEngineDefaultParameters operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/EngineDefaults
type EngineDefaults struct {
_ struct{} `type:"structure"`
// A list of parameters specific to a particular cache node type. Each element
// in the list contains detailed information about one parameter.
CacheNodeTypeSpecificParameters []*CacheNodeTypeSpecificParameter `locationNameList:"CacheNodeTypeSpecificParameter" type:"list"`
// Specifies the name of the cache parameter group family to which the engine
// default parameters apply.
//
// Valid values are: memcached1.4 | redis2.6 | redis2.8 | redis3.2
CacheParameterGroupFamily *string `type:"string"`
// Provides an identifier to allow retrieval of paginated results.
Marker *string `type:"string"`
// Contains a list of engine default parameters.
Parameters []*Parameter `locationNameList:"Parameter" type:"list"`
}
// String returns the string representation
func (s EngineDefaults) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s EngineDefaults) GoString() string {
return s.String()
}
// SetCacheNodeTypeSpecificParameters sets the CacheNodeTypeSpecificParameters field's value.
func (s *EngineDefaults) SetCacheNodeTypeSpecificParameters(v []*CacheNodeTypeSpecificParameter) *EngineDefaults {
s.CacheNodeTypeSpecificParameters = v
return s
}
// SetCacheParameterGroupFamily sets the CacheParameterGroupFamily field's value.
func (s *EngineDefaults) SetCacheParameterGroupFamily(v string) *EngineDefaults {
s.CacheParameterGroupFamily = &v
return s
}
// SetMarker sets the Marker field's value.
func (s *EngineDefaults) SetMarker(v string) *EngineDefaults {
s.Marker = &v
return s
}
// SetParameters sets the Parameters field's value.
func (s *EngineDefaults) SetParameters(v []*Parameter) *EngineDefaults {
s.Parameters = v
return s
}
// Represents a single occurrence of something interesting within the system.
// Some examples of events are creating a cache cluster, adding or removing
// a cache node, or rebooting a node.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/Event
type Event struct {
_ struct{} `type:"structure"`
// The date and time when the event occurred.
Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The text of the event.
Message *string `type:"string"`
// The identifier for the source of the event. For example, if the event occurred
// at the cache cluster level, the identifier would be the name of the cache
// cluster.
SourceIdentifier *string `type:"string"`
// Specifies the origin of this event - a cache cluster, a parameter group,
// a security group, etc.
SourceType *string `type:"string" enum:"SourceType"`
}
// String returns the string representation
func (s Event) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Event) GoString() string {
return s.String()
}
// SetDate sets the Date field's value.
func (s *Event) SetDate(v time.Time) *Event {
s.Date = &v
return s
}
// SetMessage sets the Message field's value.
func (s *Event) SetMessage(v string) *Event {
s.Message = &v
return s
}
// SetSourceIdentifier sets the SourceIdentifier field's value.
func (s *Event) SetSourceIdentifier(v string) *Event {
s.SourceIdentifier = &v
return s
}
// SetSourceType sets the SourceType field's value.
func (s *Event) SetSourceType(v string) *Event {
s.SourceType = &v
return s
}
// The input parameters for the ListAllowedNodeTypeModifications operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListAllowedNodeTypeModificationsMessage
type ListAllowedNodeTypeModificationsInput struct {
_ struct{} `type:"structure"`
// The name of the cache cluster you want to scale up to a larger node instanced
// type. ElastiCache uses the cluster id to identify the current node type of
// this cluster and from that to create a list of node types you can scale up
// to.
//
// You must provide a value for either the CacheClusterId or the ReplicationGroupId.
CacheClusterId *string `type:"string"`
// The name of the replication group want to scale up to a larger node type.
// ElastiCache uses the replication group id to identify the current node type
// being used by this replication group, and from that to create a list of node
// types you can scale up to.
//
// You must provide a value for either the CacheClusterId or the ReplicationGroupId.
ReplicationGroupId *string `type:"string"`
}
// String returns the string representation
func (s ListAllowedNodeTypeModificationsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListAllowedNodeTypeModificationsInput) GoString() string {
return s.String()
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *ListAllowedNodeTypeModificationsInput) SetCacheClusterId(v string) *ListAllowedNodeTypeModificationsInput {
s.CacheClusterId = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *ListAllowedNodeTypeModificationsInput) SetReplicationGroupId(v string) *ListAllowedNodeTypeModificationsInput {
s.ReplicationGroupId = &v
return s
}
// Represents the allowed node types you can use to modify your cache cluster
// or replication group.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/AllowedNodeTypeModificationsMessage
type ListAllowedNodeTypeModificationsOutput struct {
_ struct{} `type:"structure"`
// A string list, each element of which specifies a cache node type which you
// can use to scale your cache cluster or replication group.
//
// When scaling up a Redis cluster or replication group using ModifyCacheCluster
// or ModifyReplicationGroup, use a value from this list for the CacheNodeType
// parameter.
ScaleUpModifications []*string `type:"list"`
}
// String returns the string representation
func (s ListAllowedNodeTypeModificationsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListAllowedNodeTypeModificationsOutput) GoString() string {
return s.String()
}
// SetScaleUpModifications sets the ScaleUpModifications field's value.
func (s *ListAllowedNodeTypeModificationsOutput) SetScaleUpModifications(v []*string) *ListAllowedNodeTypeModificationsOutput {
s.ScaleUpModifications = v
return s
}
// The input parameters for the ListTagsForResource operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ListTagsForResourceMessage
type ListTagsForResourceInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) of the resource for which you want the list
// of tags, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
// or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot.
//
// For more information about ARNs, see Amazon Resource Names (ARNs) and AWS
// Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// ResourceName is a required field
ResourceName *string `type:"string" required:"true"`
}
// String returns the string representation
func (s ListTagsForResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ListTagsForResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ListTagsForResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"}
if s.ResourceName == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceName sets the ResourceName field's value.
func (s *ListTagsForResourceInput) SetResourceName(v string) *ListTagsForResourceInput {
s.ResourceName = &v
return s
}
// Represents the input of a ModifyCacheCluster operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheClusterMessage
type ModifyCacheClusterInput struct {
_ struct{} `type:"structure"`
// Specifies whether the new nodes in this Memcached cache cluster are all created
// in a single Availability Zone or created across multiple Availability Zones.
//
// Valid values: single-az | cross-az.
//
// This option is only supported for Memcached cache clusters.
//
// You cannot specify single-az if the Memcached cache cluster already has cache
// nodes in different Availability Zones. If cross-az is specified, existing
// Memcached nodes remain in their current Availability Zone.
//
// Only newly created nodes are located in different Availability Zones. For
// instructions on how to move existing Memcached nodes to different Availability
// Zones, see the Availability Zone Considerations section of Cache Node Considerations
// for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html).
AZMode *string `type:"string" enum:"AZMode"`
// If true, this parameter causes the modifications in this request and any
// pending modifications to be applied, asynchronously and as soon as possible,
// regardless of the PreferredMaintenanceWindow setting for the cache cluster.
//
// If false, changes to the cache cluster are applied on the next maintenance
// reboot, or the next failure reboot, whichever occurs first.
//
// If you perform a ModifyCacheCluster before a pending modification is applied,
// the pending modification is replaced by the newer modification.
//
// Valid values: true | false
//
// Default: false
ApplyImmediately *bool `type:"boolean"`
// This parameter is currently disabled.
AutoMinorVersionUpgrade *bool `type:"boolean"`
// The cache cluster identifier. This value is stored as a lowercase string.
//
// CacheClusterId is a required field
CacheClusterId *string `type:"string" required:"true"`
// A list of cache node IDs to be removed. A node ID is a numeric identifier
// (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less
// than the existing number of cache nodes. The number of cache node IDs supplied
// in this parameter must match the difference between the existing number of
// cache nodes in the cluster or pending cache nodes, whichever is greater,
// and the value of NumCacheNodes in the request.
//
// For example: If you have 3 active cache nodes, 7 pending cache nodes, and
// the number of cache nodes in this ModifyCacheCluser call is 5, you must list
// 2 (7 - 5) cache node IDs to remove.
CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"`
// A valid cache node type that you want to scale this cache cluster up to.
CacheNodeType *string `type:"string"`
// The name of the cache parameter group to apply to this cache cluster. This
// change is asynchronously applied as soon as possible for parameters when
// the ApplyImmediately parameter is specified as true for this request.
CacheParameterGroupName *string `type:"string"`
// A list of cache security group names to authorize on this cache cluster.
// This change is asynchronously applied as soon as possible.
//
// You can use this parameter only with clusters that are created outside of
// an Amazon Virtual Private Cloud (Amazon VPC).
//
// Constraints: Must contain no more than 255 alphanumeric characters. Must
// not be "Default".
CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"`
// The upgraded version of the cache engine to be run on the cache nodes.
//
// Important: You can upgrade to a newer engine version (see Selecting a Cache
// Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)),
// but you cannot downgrade to an earlier engine version. If you want to use
// an earlier engine version, you must delete the existing cache cluster and
// create it anew with the earlier engine version.
EngineVersion *string `type:"string"`
// The list of Availability Zones where the new Memcached cache nodes are created.
//
// This parameter is only valid when NumCacheNodes in the request is greater
// than the sum of the number of active cache nodes and the number of cache
// nodes pending creation (which may be zero). The number of Availability Zones
// supplied in this list must match the cache nodes being added in this request.
//
// This option is only supported on Memcached clusters.
//
// Scenarios:
//
// * Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify
// NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones
// for the two new nodes.
//
// * Scenario 2: You have 3 active nodes and 2 nodes pending creation (from
// the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6
// ((3 + 2) + 1) and optionally specify an Availability Zone for the new
// node.
//
// * Scenario 3: You want to cancel all pending operations. Specify NumCacheNodes=3
// to cancel all pending operations.
//
// The Availability Zone placement of nodes pending creation cannot be modified.
// If you wish to cancel any nodes pending creation, add 0 nodes by setting
// NumCacheNodes to the number of current nodes.
//
// If cross-az is specified, existing Memcached nodes remain in their current
// Availability Zone. Only newly created nodes can be located in different Availability
// Zones. For guidance on how to move existing Memcached nodes to different
// Availability Zones, see the Availability Zone Considerations section of Cache
// Node Considerations for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html).
//
// Impact of new add/remove requests upon pending requests
//
// * Scenario-1
//
// Pending Action: Delete
//
// New Request: Delete
//
// Result: The new delete, pending or immediate, replaces the pending delete.
//
// * Scenario-2
//
// Pending Action: Delete
//
// New Request: Create
//
// Result: The new create, pending or immediate, replaces the pending delete.
//
// * Scenario-3
//
// Pending Action: Create
//
// New Request: Delete
//
// Result: The new delete, pending or immediate, replaces the pending create.
//
// * Scenario-4
//
// Pending Action: Create
//
// New Request: Create
//
// Result: The new create is added to the pending create.
//
// Important: If the new create request is Apply Immediately - Yes, all creates
// are performed immediately. If the new create request is Apply Immediately
// - No, all creates are pending.
NewAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"`
// The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications
// are sent.
//
// The Amazon SNS topic owner must be same as the cache cluster owner.
NotificationTopicArn *string `type:"string"`
// The status of the Amazon SNS notification topic. Notifications are sent only
// if the status is active.
//
// Valid values: active | inactive
NotificationTopicStatus *string `type:"string"`
// The number of cache nodes that the cache cluster should have. If the value
// for NumCacheNodes is greater than the sum of the number of current cache
// nodes and the number of cache nodes pending creation (which may be zero),
// more nodes are added. If the value is less than the number of existing cache
// nodes, nodes are removed. If the value is equal to the number of current
// cache nodes, any pending add or remove requests are canceled.
//
// If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter
// to provide the IDs of the specific cache nodes to remove.
//
// For clusters running Redis, this value must be 1. For clusters running Memcached,
// this value must be between 1 and 20.
//
// Adding or removing Memcached cache nodes can be applied immediately or as
// a pending operation (see ApplyImmediately).
//
// A pending operation to modify the number of cache nodes in a cluster during
// its maintenance window, whether by adding or removing nodes in accordance
// with the scale out architecture, is not queued. The customer's latest request
// to add or remove nodes to the cluster overrides any previous pending operations
// to modify the number of cache nodes in the cluster. For example, a request
// to remove 2 nodes would override a previous pending operation to remove 3
// nodes. Similarly, a request to add 2 nodes would override a previous pending
// operation to remove 3 nodes and vice versa. As Memcached cache nodes may
// now be provisioned in different Availability Zones with flexible cache node
// placement, a request to add nodes does not automatically override a previous
// pending operation to add nodes. The customer can modify the previous pending
// operation to add more nodes or explicitly cancel the pending request and
// retry the new request. To cancel pending operations to modify the number
// of cache nodes in a cluster, use the ModifyCacheCluster request and set NumCacheNodes
// equal to the number of cache nodes currently in the cache cluster.
NumCacheNodes *int64 `type:"integer"`
// Specifies the weekly time range during which maintenance on the cluster is
// performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
// (24H Clock UTC). The minimum maintenance window is a 60 minute period.
//
// Valid values for ddd are:
//
// * sun
//
// * mon
//
// * tue
//
// * wed
//
// * thu
//
// * fri
//
// * sat
//
// Example: sun:23:00-mon:01:30
PreferredMaintenanceWindow *string `type:"string"`
// Specifies the VPC Security Groups associated with the cache cluster.
//
// This parameter can be used only with clusters that are created in an Amazon
// Virtual Private Cloud (Amazon VPC).
SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"`
// The number of days for which ElastiCache retains automatic cache cluster
// snapshots before deleting them. For example, if you set SnapshotRetentionLimit
// to 5, a snapshot that was taken today is retained for 5 days before being
// deleted.
//
// If the value of SnapshotRetentionLimit is set to zero (0), backups are turned
// off.
SnapshotRetentionLimit *int64 `type:"integer"`
// The daily time range (in UTC) during which ElastiCache begins taking a daily
// snapshot of your cache cluster.
SnapshotWindow *string `type:"string"`
}
// String returns the string representation
func (s ModifyCacheClusterInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyCacheClusterInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ModifyCacheClusterInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ModifyCacheClusterInput"}
if s.CacheClusterId == nil {
invalidParams.Add(request.NewErrParamRequired("CacheClusterId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetAZMode sets the AZMode field's value.
func (s *ModifyCacheClusterInput) SetAZMode(v string) *ModifyCacheClusterInput {
s.AZMode = &v
return s
}
// SetApplyImmediately sets the ApplyImmediately field's value.
func (s *ModifyCacheClusterInput) SetApplyImmediately(v bool) *ModifyCacheClusterInput {
s.ApplyImmediately = &v
return s
}
// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value.
func (s *ModifyCacheClusterInput) SetAutoMinorVersionUpgrade(v bool) *ModifyCacheClusterInput {
s.AutoMinorVersionUpgrade = &v
return s
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *ModifyCacheClusterInput) SetCacheClusterId(v string) *ModifyCacheClusterInput {
s.CacheClusterId = &v
return s
}
// SetCacheNodeIdsToRemove sets the CacheNodeIdsToRemove field's value.
func (s *ModifyCacheClusterInput) SetCacheNodeIdsToRemove(v []*string) *ModifyCacheClusterInput {
s.CacheNodeIdsToRemove = v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *ModifyCacheClusterInput) SetCacheNodeType(v string) *ModifyCacheClusterInput {
s.CacheNodeType = &v
return s
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *ModifyCacheClusterInput) SetCacheParameterGroupName(v string) *ModifyCacheClusterInput {
s.CacheParameterGroupName = &v
return s
}
// SetCacheSecurityGroupNames sets the CacheSecurityGroupNames field's value.
func (s *ModifyCacheClusterInput) SetCacheSecurityGroupNames(v []*string) *ModifyCacheClusterInput {
s.CacheSecurityGroupNames = v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *ModifyCacheClusterInput) SetEngineVersion(v string) *ModifyCacheClusterInput {
s.EngineVersion = &v
return s
}
// SetNewAvailabilityZones sets the NewAvailabilityZones field's value.
func (s *ModifyCacheClusterInput) SetNewAvailabilityZones(v []*string) *ModifyCacheClusterInput {
s.NewAvailabilityZones = v
return s
}
// SetNotificationTopicArn sets the NotificationTopicArn field's value.
func (s *ModifyCacheClusterInput) SetNotificationTopicArn(v string) *ModifyCacheClusterInput {
s.NotificationTopicArn = &v
return s
}
// SetNotificationTopicStatus sets the NotificationTopicStatus field's value.
func (s *ModifyCacheClusterInput) SetNotificationTopicStatus(v string) *ModifyCacheClusterInput {
s.NotificationTopicStatus = &v
return s
}
// SetNumCacheNodes sets the NumCacheNodes field's value.
func (s *ModifyCacheClusterInput) SetNumCacheNodes(v int64) *ModifyCacheClusterInput {
s.NumCacheNodes = &v
return s
}
// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
func (s *ModifyCacheClusterInput) SetPreferredMaintenanceWindow(v string) *ModifyCacheClusterInput {
s.PreferredMaintenanceWindow = &v
return s
}
// SetSecurityGroupIds sets the SecurityGroupIds field's value.
func (s *ModifyCacheClusterInput) SetSecurityGroupIds(v []*string) *ModifyCacheClusterInput {
s.SecurityGroupIds = v
return s
}
// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value.
func (s *ModifyCacheClusterInput) SetSnapshotRetentionLimit(v int64) *ModifyCacheClusterInput {
s.SnapshotRetentionLimit = &v
return s
}
// SetSnapshotWindow sets the SnapshotWindow field's value.
func (s *ModifyCacheClusterInput) SetSnapshotWindow(v string) *ModifyCacheClusterInput {
s.SnapshotWindow = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheClusterResult
type ModifyCacheClusterOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific cache cluster.
CacheCluster *CacheCluster `type:"structure"`
}
// String returns the string representation
func (s ModifyCacheClusterOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyCacheClusterOutput) GoString() string {
return s.String()
}
// SetCacheCluster sets the CacheCluster field's value.
func (s *ModifyCacheClusterOutput) SetCacheCluster(v *CacheCluster) *ModifyCacheClusterOutput {
s.CacheCluster = v
return s
}
// Represents the input of a ModifyCacheParameterGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheParameterGroupMessage
type ModifyCacheParameterGroupInput struct {
_ struct{} `type:"structure"`
// The name of the cache parameter group to modify.
//
// CacheParameterGroupName is a required field
CacheParameterGroupName *string `type:"string" required:"true"`
// An array of parameter names and values for the parameter update. You must
// supply at least one parameter name and value; subsequent arguments are optional.
// A maximum of 20 parameters may be modified per request.
//
// ParameterNameValues is a required field
ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list" required:"true"`
}
// String returns the string representation
func (s ModifyCacheParameterGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyCacheParameterGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ModifyCacheParameterGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ModifyCacheParameterGroupInput"}
if s.CacheParameterGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName"))
}
if s.ParameterNameValues == nil {
invalidParams.Add(request.NewErrParamRequired("ParameterNameValues"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *ModifyCacheParameterGroupInput) SetCacheParameterGroupName(v string) *ModifyCacheParameterGroupInput {
s.CacheParameterGroupName = &v
return s
}
// SetParameterNameValues sets the ParameterNameValues field's value.
func (s *ModifyCacheParameterGroupInput) SetParameterNameValues(v []*ParameterNameValue) *ModifyCacheParameterGroupInput {
s.ParameterNameValues = v
return s
}
// Represents the input of a ModifyCacheSubnetGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheSubnetGroupMessage
type ModifyCacheSubnetGroupInput struct {
_ struct{} `type:"structure"`
// A description of the cache subnet group.
CacheSubnetGroupDescription *string `type:"string"`
// The name for the cache subnet group. This value is stored as a lowercase
// string.
//
// Constraints: Must contain no more than 255 alphanumeric characters or hyphens.
//
// Example: mysubnetgroup
//
// CacheSubnetGroupName is a required field
CacheSubnetGroupName *string `type:"string" required:"true"`
// The EC2 subnet IDs for the cache subnet group.
SubnetIds []*string `locationNameList:"SubnetIdentifier" type:"list"`
}
// String returns the string representation
func (s ModifyCacheSubnetGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyCacheSubnetGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ModifyCacheSubnetGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ModifyCacheSubnetGroupInput"}
if s.CacheSubnetGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheSubnetGroupName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheSubnetGroupDescription sets the CacheSubnetGroupDescription field's value.
func (s *ModifyCacheSubnetGroupInput) SetCacheSubnetGroupDescription(v string) *ModifyCacheSubnetGroupInput {
s.CacheSubnetGroupDescription = &v
return s
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *ModifyCacheSubnetGroupInput) SetCacheSubnetGroupName(v string) *ModifyCacheSubnetGroupInput {
s.CacheSubnetGroupName = &v
return s
}
// SetSubnetIds sets the SubnetIds field's value.
func (s *ModifyCacheSubnetGroupInput) SetSubnetIds(v []*string) *ModifyCacheSubnetGroupInput {
s.SubnetIds = v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyCacheSubnetGroupResult
type ModifyCacheSubnetGroupOutput struct {
_ struct{} `type:"structure"`
// Represents the output of one of the following operations:
//
// * CreateCacheSubnetGroup
//
// * ModifyCacheSubnetGroup
CacheSubnetGroup *CacheSubnetGroup `type:"structure"`
}
// String returns the string representation
func (s ModifyCacheSubnetGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyCacheSubnetGroupOutput) GoString() string {
return s.String()
}
// SetCacheSubnetGroup sets the CacheSubnetGroup field's value.
func (s *ModifyCacheSubnetGroupOutput) SetCacheSubnetGroup(v *CacheSubnetGroup) *ModifyCacheSubnetGroupOutput {
s.CacheSubnetGroup = v
return s
}
// Represents the input of a ModifyReplicationGroups operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroupMessage
type ModifyReplicationGroupInput struct {
_ struct{} `type:"structure"`
// If true, this parameter causes the modifications in this request and any
// pending modifications to be applied, asynchronously and as soon as possible,
// regardless of the PreferredMaintenanceWindow setting for the replication
// group.
//
// If false, changes to the nodes in the replication group are applied on the
// next maintenance reboot, or the next failure reboot, whichever occurs first.
//
// Valid values: true | false
//
// Default: false
ApplyImmediately *bool `type:"boolean"`
// This parameter is currently disabled.
AutoMinorVersionUpgrade *bool `type:"boolean"`
// Determines whether a read replica is automatically promoted to read/write
// primary if the existing primary encounters a failure.
//
// Valid values: true | false
//
// ElastiCache Multi-AZ replication groups are not supported on:
//
// Redis versions earlier than 2.8.6.
//
// Redis (cluster mode disabled):T1 and T2 cache node types.
//
// Redis (cluster mode enabled): T1 node types.
AutomaticFailoverEnabled *bool `type:"boolean"`
// A valid cache node type that you want to scale this replication group to.
CacheNodeType *string `type:"string"`
// The name of the cache parameter group to apply to all of the clusters in
// this replication group. This change is asynchronously applied as soon as
// possible for parameters when the ApplyImmediately parameter is specified
// as true for this request.
CacheParameterGroupName *string `type:"string"`
// A list of cache security group names to authorize for the clusters in this
// replication group. This change is asynchronously applied as soon as possible.
//
// This parameter can be used only with replication group containing cache clusters
// running outside of an Amazon Virtual Private Cloud (Amazon VPC).
//
// Constraints: Must contain no more than 255 alphanumeric characters. Must
// not be Default.
CacheSecurityGroupNames []*string `locationNameList:"CacheSecurityGroupName" type:"list"`
// The upgraded version of the cache engine to be run on the cache clusters
// in the replication group.
//
// Important: You can upgrade to a newer engine version (see Selecting a Cache
// Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)),
// but you cannot downgrade to an earlier engine version. If you want to use
// an earlier engine version, you must delete the existing replication group
// and create it anew with the earlier engine version.
EngineVersion *string `type:"string"`
// The name of the Node Group (called shard in the console).
NodeGroupId *string `type:"string"`
// The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications
// are sent.
//
// The Amazon SNS topic owner must be same as the replication group owner.
NotificationTopicArn *string `type:"string"`
// The status of the Amazon SNS notification topic for the replication group.
// Notifications are sent only if the status is active.
//
// Valid values: active | inactive
NotificationTopicStatus *string `type:"string"`
// Specifies the weekly time range during which maintenance on the cluster is
// performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
// (24H Clock UTC). The minimum maintenance window is a 60 minute period.
//
// Valid values for ddd are:
//
// * sun
//
// * mon
//
// * tue
//
// * wed
//
// * thu
//
// * fri
//
// * sat
//
// Example: sun:23:00-mon:01:30
PreferredMaintenanceWindow *string `type:"string"`
// For replication groups with a single primary, if this parameter is specified,
// ElastiCache promotes the specified cluster in the specified replication group
// to the primary role. The nodes of all other clusters in the replication group
// are read replicas.
PrimaryClusterId *string `type:"string"`
// A description for the replication group. Maximum length is 255 characters.
ReplicationGroupDescription *string `type:"string"`
// The identifier of the replication group to modify.
//
// ReplicationGroupId is a required field
ReplicationGroupId *string `type:"string" required:"true"`
// Specifies the VPC Security Groups associated with the cache clusters in the
// replication group.
//
// This parameter can be used only with replication group containing cache clusters
// running in an Amazon Virtual Private Cloud (Amazon VPC).
SecurityGroupIds []*string `locationNameList:"SecurityGroupId" type:"list"`
// The number of days for which ElastiCache retains automatic node group (shard)
// snapshots before deleting them. For example, if you set SnapshotRetentionLimit
// to 5, a snapshot that was taken today is retained for 5 days before being
// deleted.
//
// Important If the value of SnapshotRetentionLimit is set to zero (0), backups
// are turned off.
SnapshotRetentionLimit *int64 `type:"integer"`
// The daily time range (in UTC) during which ElastiCache begins taking a daily
// snapshot of the node group (shard) specified by SnapshottingClusterId.
//
// Example: 05:00-09:00
//
// If you do not specify this parameter, ElastiCache automatically chooses an
// appropriate time range.
SnapshotWindow *string `type:"string"`
// The cache cluster ID that is used as the daily snapshot source for the replication
// group. This parameter cannot be set for Redis (cluster mode enabled) replication
// groups.
SnapshottingClusterId *string `type:"string"`
}
// String returns the string representation
func (s ModifyReplicationGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyReplicationGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ModifyReplicationGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ModifyReplicationGroupInput"}
if s.ReplicationGroupId == nil {
invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetApplyImmediately sets the ApplyImmediately field's value.
func (s *ModifyReplicationGroupInput) SetApplyImmediately(v bool) *ModifyReplicationGroupInput {
s.ApplyImmediately = &v
return s
}
// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value.
func (s *ModifyReplicationGroupInput) SetAutoMinorVersionUpgrade(v bool) *ModifyReplicationGroupInput {
s.AutoMinorVersionUpgrade = &v
return s
}
// SetAutomaticFailoverEnabled sets the AutomaticFailoverEnabled field's value.
func (s *ModifyReplicationGroupInput) SetAutomaticFailoverEnabled(v bool) *ModifyReplicationGroupInput {
s.AutomaticFailoverEnabled = &v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *ModifyReplicationGroupInput) SetCacheNodeType(v string) *ModifyReplicationGroupInput {
s.CacheNodeType = &v
return s
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *ModifyReplicationGroupInput) SetCacheParameterGroupName(v string) *ModifyReplicationGroupInput {
s.CacheParameterGroupName = &v
return s
}
// SetCacheSecurityGroupNames sets the CacheSecurityGroupNames field's value.
func (s *ModifyReplicationGroupInput) SetCacheSecurityGroupNames(v []*string) *ModifyReplicationGroupInput {
s.CacheSecurityGroupNames = v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *ModifyReplicationGroupInput) SetEngineVersion(v string) *ModifyReplicationGroupInput {
s.EngineVersion = &v
return s
}
// SetNodeGroupId sets the NodeGroupId field's value.
func (s *ModifyReplicationGroupInput) SetNodeGroupId(v string) *ModifyReplicationGroupInput {
s.NodeGroupId = &v
return s
}
// SetNotificationTopicArn sets the NotificationTopicArn field's value.
func (s *ModifyReplicationGroupInput) SetNotificationTopicArn(v string) *ModifyReplicationGroupInput {
s.NotificationTopicArn = &v
return s
}
// SetNotificationTopicStatus sets the NotificationTopicStatus field's value.
func (s *ModifyReplicationGroupInput) SetNotificationTopicStatus(v string) *ModifyReplicationGroupInput {
s.NotificationTopicStatus = &v
return s
}
// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
func (s *ModifyReplicationGroupInput) SetPreferredMaintenanceWindow(v string) *ModifyReplicationGroupInput {
s.PreferredMaintenanceWindow = &v
return s
}
// SetPrimaryClusterId sets the PrimaryClusterId field's value.
func (s *ModifyReplicationGroupInput) SetPrimaryClusterId(v string) *ModifyReplicationGroupInput {
s.PrimaryClusterId = &v
return s
}
// SetReplicationGroupDescription sets the ReplicationGroupDescription field's value.
func (s *ModifyReplicationGroupInput) SetReplicationGroupDescription(v string) *ModifyReplicationGroupInput {
s.ReplicationGroupDescription = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *ModifyReplicationGroupInput) SetReplicationGroupId(v string) *ModifyReplicationGroupInput {
s.ReplicationGroupId = &v
return s
}
// SetSecurityGroupIds sets the SecurityGroupIds field's value.
func (s *ModifyReplicationGroupInput) SetSecurityGroupIds(v []*string) *ModifyReplicationGroupInput {
s.SecurityGroupIds = v
return s
}
// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value.
func (s *ModifyReplicationGroupInput) SetSnapshotRetentionLimit(v int64) *ModifyReplicationGroupInput {
s.SnapshotRetentionLimit = &v
return s
}
// SetSnapshotWindow sets the SnapshotWindow field's value.
func (s *ModifyReplicationGroupInput) SetSnapshotWindow(v string) *ModifyReplicationGroupInput {
s.SnapshotWindow = &v
return s
}
// SetSnapshottingClusterId sets the SnapshottingClusterId field's value.
func (s *ModifyReplicationGroupInput) SetSnapshottingClusterId(v string) *ModifyReplicationGroupInput {
s.SnapshottingClusterId = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ModifyReplicationGroupResult
type ModifyReplicationGroupOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific Redis replication group.
ReplicationGroup *ReplicationGroup `type:"structure"`
}
// String returns the string representation
func (s ModifyReplicationGroupOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ModifyReplicationGroupOutput) GoString() string {
return s.String()
}
// SetReplicationGroup sets the ReplicationGroup field's value.
func (s *ModifyReplicationGroupOutput) SetReplicationGroup(v *ReplicationGroup) *ModifyReplicationGroupOutput {
s.ReplicationGroup = v
return s
}
// Represents a collection of cache nodes in a replication group. One node in
// the node group is the read/write primary node. All the other nodes are read-only
// Replica nodes.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/NodeGroup
type NodeGroup struct {
_ struct{} `type:"structure"`
// The identifier for the node group (shard). A Redis (cluster mode disabled)
// replication group contains only 1 node group; therefore, the node group ID
// is 0001. A Redis (cluster mode enabled) replication group contains 1 to 15
// node groups numbered 0001 to 0015.
NodeGroupId *string `type:"string"`
// A list containing information about individual nodes within the node group
// (shard).
NodeGroupMembers []*NodeGroupMember `locationNameList:"NodeGroupMember" type:"list"`
// The endpoint of the primary node in this node group (shard).
PrimaryEndpoint *Endpoint `type:"structure"`
// The keyspace for this node group (shard).
Slots *string `type:"string"`
// The current state of this replication group - creating, available, etc.
Status *string `type:"string"`
}
// String returns the string representation
func (s NodeGroup) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NodeGroup) GoString() string {
return s.String()
}
// SetNodeGroupId sets the NodeGroupId field's value.
func (s *NodeGroup) SetNodeGroupId(v string) *NodeGroup {
s.NodeGroupId = &v
return s
}
// SetNodeGroupMembers sets the NodeGroupMembers field's value.
func (s *NodeGroup) SetNodeGroupMembers(v []*NodeGroupMember) *NodeGroup {
s.NodeGroupMembers = v
return s
}
// SetPrimaryEndpoint sets the PrimaryEndpoint field's value.
func (s *NodeGroup) SetPrimaryEndpoint(v *Endpoint) *NodeGroup {
s.PrimaryEndpoint = v
return s
}
// SetSlots sets the Slots field's value.
func (s *NodeGroup) SetSlots(v string) *NodeGroup {
s.Slots = &v
return s
}
// SetStatus sets the Status field's value.
func (s *NodeGroup) SetStatus(v string) *NodeGroup {
s.Status = &v
return s
}
// node group (shard) configuration options. Each node group (shard) configuration
// has the following: Slots, PrimaryAvailabilityZone, ReplicaAvailabilityZones,
// ReplicaCount.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/NodeGroupConfiguration
type NodeGroupConfiguration struct {
_ struct{} `type:"structure"`
// The Availability Zone where the primary node of this node group (shard) is
// launched.
PrimaryAvailabilityZone *string `type:"string"`
// A list of Availability Zones to be used for the read replicas. The number
// of Availability Zones in this list must match the value of ReplicaCount or
// ReplicasPerNodeGroup if not specified.
ReplicaAvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"`
// The number of read replica nodes in this node group (shard).
ReplicaCount *int64 `type:"integer"`
// A string that specifies the keyspace for a particular node group. Keyspaces
// range from 0 to 16,383. The string is in the format startkey-endkey.
//
// Example: "0-3999"
Slots *string `type:"string"`
}
// String returns the string representation
func (s NodeGroupConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NodeGroupConfiguration) GoString() string {
return s.String()
}
// SetPrimaryAvailabilityZone sets the PrimaryAvailabilityZone field's value.
func (s *NodeGroupConfiguration) SetPrimaryAvailabilityZone(v string) *NodeGroupConfiguration {
s.PrimaryAvailabilityZone = &v
return s
}
// SetReplicaAvailabilityZones sets the ReplicaAvailabilityZones field's value.
func (s *NodeGroupConfiguration) SetReplicaAvailabilityZones(v []*string) *NodeGroupConfiguration {
s.ReplicaAvailabilityZones = v
return s
}
// SetReplicaCount sets the ReplicaCount field's value.
func (s *NodeGroupConfiguration) SetReplicaCount(v int64) *NodeGroupConfiguration {
s.ReplicaCount = &v
return s
}
// SetSlots sets the Slots field's value.
func (s *NodeGroupConfiguration) SetSlots(v string) *NodeGroupConfiguration {
s.Slots = &v
return s
}
// Represents a single node within a node group (shard).
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/NodeGroupMember
type NodeGroupMember struct {
_ struct{} `type:"structure"`
// The ID of the cache cluster to which the node belongs.
CacheClusterId *string `type:"string"`
// The ID of the node within its cache cluster. A node ID is a numeric identifier
// (0001, 0002, etc.).
CacheNodeId *string `type:"string"`
// The role that is currently assigned to the node - primary or replica.
CurrentRole *string `type:"string"`
// The name of the Availability Zone in which the node is located.
PreferredAvailabilityZone *string `type:"string"`
// Represents the information required for client programs to connect to a cache
// node.
ReadEndpoint *Endpoint `type:"structure"`
}
// String returns the string representation
func (s NodeGroupMember) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NodeGroupMember) GoString() string {
return s.String()
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *NodeGroupMember) SetCacheClusterId(v string) *NodeGroupMember {
s.CacheClusterId = &v
return s
}
// SetCacheNodeId sets the CacheNodeId field's value.
func (s *NodeGroupMember) SetCacheNodeId(v string) *NodeGroupMember {
s.CacheNodeId = &v
return s
}
// SetCurrentRole sets the CurrentRole field's value.
func (s *NodeGroupMember) SetCurrentRole(v string) *NodeGroupMember {
s.CurrentRole = &v
return s
}
// SetPreferredAvailabilityZone sets the PreferredAvailabilityZone field's value.
func (s *NodeGroupMember) SetPreferredAvailabilityZone(v string) *NodeGroupMember {
s.PreferredAvailabilityZone = &v
return s
}
// SetReadEndpoint sets the ReadEndpoint field's value.
func (s *NodeGroupMember) SetReadEndpoint(v *Endpoint) *NodeGroupMember {
s.ReadEndpoint = v
return s
}
// Represents an individual cache node in a snapshot of a cache cluster.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/NodeSnapshot
type NodeSnapshot struct {
_ struct{} `type:"structure"`
// A unique identifier for the source cache cluster.
CacheClusterId *string `type:"string"`
// The date and time when the cache node was created in the source cache cluster.
CacheNodeCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The cache node identifier for the node in the source cache cluster.
CacheNodeId *string `type:"string"`
// The size of the cache on the source cache node.
CacheSize *string `type:"string"`
// The configuration for the source node group (shard).
NodeGroupConfiguration *NodeGroupConfiguration `type:"structure"`
// A unique identifier for the source node group (shard).
NodeGroupId *string `type:"string"`
// The date and time when the source node's metadata and cache data set was
// obtained for the snapshot.
SnapshotCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
}
// String returns the string representation
func (s NodeSnapshot) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NodeSnapshot) GoString() string {
return s.String()
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *NodeSnapshot) SetCacheClusterId(v string) *NodeSnapshot {
s.CacheClusterId = &v
return s
}
// SetCacheNodeCreateTime sets the CacheNodeCreateTime field's value.
func (s *NodeSnapshot) SetCacheNodeCreateTime(v time.Time) *NodeSnapshot {
s.CacheNodeCreateTime = &v
return s
}
// SetCacheNodeId sets the CacheNodeId field's value.
func (s *NodeSnapshot) SetCacheNodeId(v string) *NodeSnapshot {
s.CacheNodeId = &v
return s
}
// SetCacheSize sets the CacheSize field's value.
func (s *NodeSnapshot) SetCacheSize(v string) *NodeSnapshot {
s.CacheSize = &v
return s
}
// SetNodeGroupConfiguration sets the NodeGroupConfiguration field's value.
func (s *NodeSnapshot) SetNodeGroupConfiguration(v *NodeGroupConfiguration) *NodeSnapshot {
s.NodeGroupConfiguration = v
return s
}
// SetNodeGroupId sets the NodeGroupId field's value.
func (s *NodeSnapshot) SetNodeGroupId(v string) *NodeSnapshot {
s.NodeGroupId = &v
return s
}
// SetSnapshotCreateTime sets the SnapshotCreateTime field's value.
func (s *NodeSnapshot) SetSnapshotCreateTime(v time.Time) *NodeSnapshot {
s.SnapshotCreateTime = &v
return s
}
// Describes a notification topic and its status. Notification topics are used
// for publishing ElastiCache events to subscribers using Amazon Simple Notification
// Service (SNS).
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/NotificationConfiguration
type NotificationConfiguration struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) that identifies the topic.
TopicArn *string `type:"string"`
// The current state of the topic.
TopicStatus *string `type:"string"`
}
// String returns the string representation
func (s NotificationConfiguration) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s NotificationConfiguration) GoString() string {
return s.String()
}
// SetTopicArn sets the TopicArn field's value.
func (s *NotificationConfiguration) SetTopicArn(v string) *NotificationConfiguration {
s.TopicArn = &v
return s
}
// SetTopicStatus sets the TopicStatus field's value.
func (s *NotificationConfiguration) SetTopicStatus(v string) *NotificationConfiguration {
s.TopicStatus = &v
return s
}
// Describes an individual setting that controls some aspect of ElastiCache
// behavior.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/Parameter
type Parameter struct {
_ struct{} `type:"structure"`
// The valid range of values for the parameter.
AllowedValues *string `type:"string"`
// Indicates whether a change to the parameter is applied immediately or requires
// a reboot for the change to be applied. You can force a reboot or wait until
// the next maintenance window's reboot. For more information, see Rebooting
// a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Rebooting.html).
ChangeType *string `type:"string" enum:"ChangeType"`
// The valid data type for the parameter.
DataType *string `type:"string"`
// A description of the parameter.
Description *string `type:"string"`
// Indicates whether (true) or not (false) the parameter can be modified. Some
// parameters have security or operational implications that prevent them from
// being changed.
IsModifiable *bool `type:"boolean"`
// The earliest cache engine version to which the parameter can apply.
MinimumEngineVersion *string `type:"string"`
// The name of the parameter.
ParameterName *string `type:"string"`
// The value of the parameter.
ParameterValue *string `type:"string"`
// The source of the parameter.
Source *string `type:"string"`
}
// String returns the string representation
func (s Parameter) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Parameter) GoString() string {
return s.String()
}
// SetAllowedValues sets the AllowedValues field's value.
func (s *Parameter) SetAllowedValues(v string) *Parameter {
s.AllowedValues = &v
return s
}
// SetChangeType sets the ChangeType field's value.
func (s *Parameter) SetChangeType(v string) *Parameter {
s.ChangeType = &v
return s
}
// SetDataType sets the DataType field's value.
func (s *Parameter) SetDataType(v string) *Parameter {
s.DataType = &v
return s
}
// SetDescription sets the Description field's value.
func (s *Parameter) SetDescription(v string) *Parameter {
s.Description = &v
return s
}
// SetIsModifiable sets the IsModifiable field's value.
func (s *Parameter) SetIsModifiable(v bool) *Parameter {
s.IsModifiable = &v
return s
}
// SetMinimumEngineVersion sets the MinimumEngineVersion field's value.
func (s *Parameter) SetMinimumEngineVersion(v string) *Parameter {
s.MinimumEngineVersion = &v
return s
}
// SetParameterName sets the ParameterName field's value.
func (s *Parameter) SetParameterName(v string) *Parameter {
s.ParameterName = &v
return s
}
// SetParameterValue sets the ParameterValue field's value.
func (s *Parameter) SetParameterValue(v string) *Parameter {
s.ParameterValue = &v
return s
}
// SetSource sets the Source field's value.
func (s *Parameter) SetSource(v string) *Parameter {
s.Source = &v
return s
}
// Describes a name-value pair that is used to update the value of a parameter.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ParameterNameValue
type ParameterNameValue struct {
_ struct{} `type:"structure"`
// The name of the parameter.
ParameterName *string `type:"string"`
// The value of the parameter.
ParameterValue *string `type:"string"`
}
// String returns the string representation
func (s ParameterNameValue) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ParameterNameValue) GoString() string {
return s.String()
}
// SetParameterName sets the ParameterName field's value.
func (s *ParameterNameValue) SetParameterName(v string) *ParameterNameValue {
s.ParameterName = &v
return s
}
// SetParameterValue sets the ParameterValue field's value.
func (s *ParameterNameValue) SetParameterValue(v string) *ParameterNameValue {
s.ParameterValue = &v
return s
}
// A group of settings that are applied to the cache cluster in the future,
// or that are currently being applied.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PendingModifiedValues
type PendingModifiedValues struct {
_ struct{} `type:"structure"`
// A list of cache node IDs that are being removed (or will be removed) from
// the cache cluster. A node ID is a numeric identifier (0001, 0002, etc.).
CacheNodeIdsToRemove []*string `locationNameList:"CacheNodeId" type:"list"`
// The cache node type that this cache cluster or replication group is scaled
// to.
CacheNodeType *string `type:"string"`
// The new cache engine version that the cache cluster runs.
EngineVersion *string `type:"string"`
// The new number of cache nodes for the cache cluster.
//
// For clusters running Redis, this value must be 1. For clusters running Memcached,
// this value must be between 1 and 20.
NumCacheNodes *int64 `type:"integer"`
}
// String returns the string representation
func (s PendingModifiedValues) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PendingModifiedValues) GoString() string {
return s.String()
}
// SetCacheNodeIdsToRemove sets the CacheNodeIdsToRemove field's value.
func (s *PendingModifiedValues) SetCacheNodeIdsToRemove(v []*string) *PendingModifiedValues {
s.CacheNodeIdsToRemove = v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *PendingModifiedValues) SetCacheNodeType(v string) *PendingModifiedValues {
s.CacheNodeType = &v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *PendingModifiedValues) SetEngineVersion(v string) *PendingModifiedValues {
s.EngineVersion = &v
return s
}
// SetNumCacheNodes sets the NumCacheNodes field's value.
func (s *PendingModifiedValues) SetNumCacheNodes(v int64) *PendingModifiedValues {
s.NumCacheNodes = &v
return s
}
// Represents the input of a PurchaseReservedCacheNodesOffering operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PurchaseReservedCacheNodesOfferingMessage
type PurchaseReservedCacheNodesOfferingInput struct {
_ struct{} `type:"structure"`
// The number of cache node instances to reserve.
//
// Default: 1
CacheNodeCount *int64 `type:"integer"`
// A customer-specified identifier to track this reservation.
//
// The Reserved Cache Node ID is an unique customer-specified identifier to
// track this reservation. If this parameter is not specified, ElastiCache automatically
// generates an identifier for the reservation.
//
// Example: myreservationID
ReservedCacheNodeId *string `type:"string"`
// The ID of the reserved cache node offering to purchase.
//
// Example: 438012d3-4052-4cc7-b2e3-8d3372e0e706
//
// ReservedCacheNodesOfferingId is a required field
ReservedCacheNodesOfferingId *string `type:"string" required:"true"`
}
// String returns the string representation
func (s PurchaseReservedCacheNodesOfferingInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PurchaseReservedCacheNodesOfferingInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *PurchaseReservedCacheNodesOfferingInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "PurchaseReservedCacheNodesOfferingInput"}
if s.ReservedCacheNodesOfferingId == nil {
invalidParams.Add(request.NewErrParamRequired("ReservedCacheNodesOfferingId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheNodeCount sets the CacheNodeCount field's value.
func (s *PurchaseReservedCacheNodesOfferingInput) SetCacheNodeCount(v int64) *PurchaseReservedCacheNodesOfferingInput {
s.CacheNodeCount = &v
return s
}
// SetReservedCacheNodeId sets the ReservedCacheNodeId field's value.
func (s *PurchaseReservedCacheNodesOfferingInput) SetReservedCacheNodeId(v string) *PurchaseReservedCacheNodesOfferingInput {
s.ReservedCacheNodeId = &v
return s
}
// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value.
func (s *PurchaseReservedCacheNodesOfferingInput) SetReservedCacheNodesOfferingId(v string) *PurchaseReservedCacheNodesOfferingInput {
s.ReservedCacheNodesOfferingId = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/PurchaseReservedCacheNodesOfferingResult
type PurchaseReservedCacheNodesOfferingOutput struct {
_ struct{} `type:"structure"`
// Represents the output of a PurchaseReservedCacheNodesOffering operation.
ReservedCacheNode *ReservedCacheNode `type:"structure"`
}
// String returns the string representation
func (s PurchaseReservedCacheNodesOfferingOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s PurchaseReservedCacheNodesOfferingOutput) GoString() string {
return s.String()
}
// SetReservedCacheNode sets the ReservedCacheNode field's value.
func (s *PurchaseReservedCacheNodesOfferingOutput) SetReservedCacheNode(v *ReservedCacheNode) *PurchaseReservedCacheNodesOfferingOutput {
s.ReservedCacheNode = v
return s
}
// Represents the input of a RebootCacheCluster operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebootCacheClusterMessage
type RebootCacheClusterInput struct {
_ struct{} `type:"structure"`
// The cache cluster identifier. This parameter is stored as a lowercase string.
//
// CacheClusterId is a required field
CacheClusterId *string `type:"string" required:"true"`
// A list of cache node IDs to reboot. A node ID is a numeric identifier (0001,
// 0002, etc.). To reboot an entire cache cluster, specify all of the cache
// node IDs.
//
// CacheNodeIdsToReboot is a required field
CacheNodeIdsToReboot []*string `locationNameList:"CacheNodeId" type:"list" required:"true"`
}
// String returns the string representation
func (s RebootCacheClusterInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RebootCacheClusterInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *RebootCacheClusterInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RebootCacheClusterInput"}
if s.CacheClusterId == nil {
invalidParams.Add(request.NewErrParamRequired("CacheClusterId"))
}
if s.CacheNodeIdsToReboot == nil {
invalidParams.Add(request.NewErrParamRequired("CacheNodeIdsToReboot"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *RebootCacheClusterInput) SetCacheClusterId(v string) *RebootCacheClusterInput {
s.CacheClusterId = &v
return s
}
// SetCacheNodeIdsToReboot sets the CacheNodeIdsToReboot field's value.
func (s *RebootCacheClusterInput) SetCacheNodeIdsToReboot(v []*string) *RebootCacheClusterInput {
s.CacheNodeIdsToReboot = v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RebootCacheClusterResult
type RebootCacheClusterOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific cache cluster.
CacheCluster *CacheCluster `type:"structure"`
}
// String returns the string representation
func (s RebootCacheClusterOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RebootCacheClusterOutput) GoString() string {
return s.String()
}
// SetCacheCluster sets the CacheCluster field's value.
func (s *RebootCacheClusterOutput) SetCacheCluster(v *CacheCluster) *RebootCacheClusterOutput {
s.CacheCluster = v
return s
}
// Contains the specific price and frequency of a recurring charges for a reserved
// cache node, or for a reserved cache node offering.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RecurringCharge
type RecurringCharge struct {
_ struct{} `type:"structure"`
// The monetary amount of the recurring charge.
RecurringChargeAmount *float64 `type:"double"`
// The frequency of the recurring charge.
RecurringChargeFrequency *string `type:"string"`
}
// String returns the string representation
func (s RecurringCharge) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RecurringCharge) GoString() string {
return s.String()
}
// SetRecurringChargeAmount sets the RecurringChargeAmount field's value.
func (s *RecurringCharge) SetRecurringChargeAmount(v float64) *RecurringCharge {
s.RecurringChargeAmount = &v
return s
}
// SetRecurringChargeFrequency sets the RecurringChargeFrequency field's value.
func (s *RecurringCharge) SetRecurringChargeFrequency(v string) *RecurringCharge {
s.RecurringChargeFrequency = &v
return s
}
// Represents the input of a RemoveTagsFromResource operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RemoveTagsFromResourceMessage
type RemoveTagsFromResourceInput struct {
_ struct{} `type:"structure"`
// The Amazon Resource Name (ARN) of the resource from which you want the tags
// removed, for example arn:aws:elasticache:us-west-2:0123456789:cluster:myCluster
// or arn:aws:elasticache:us-west-2:0123456789:snapshot:mySnapshot.
//
// For more information about ARNs, see Amazon Resource Names (ARNs) and AWS
// Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
//
// ResourceName is a required field
ResourceName *string `type:"string" required:"true"`
// A list of TagKeys identifying the tags you want removed from the named resource.
//
// TagKeys is a required field
TagKeys []*string `type:"list" required:"true"`
}
// String returns the string representation
func (s RemoveTagsFromResourceInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RemoveTagsFromResourceInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *RemoveTagsFromResourceInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"}
if s.ResourceName == nil {
invalidParams.Add(request.NewErrParamRequired("ResourceName"))
}
if s.TagKeys == nil {
invalidParams.Add(request.NewErrParamRequired("TagKeys"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetResourceName sets the ResourceName field's value.
func (s *RemoveTagsFromResourceInput) SetResourceName(v string) *RemoveTagsFromResourceInput {
s.ResourceName = &v
return s
}
// SetTagKeys sets the TagKeys field's value.
func (s *RemoveTagsFromResourceInput) SetTagKeys(v []*string) *RemoveTagsFromResourceInput {
s.TagKeys = v
return s
}
// Contains all of the attributes of a specific Redis replication group.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ReplicationGroup
type ReplicationGroup struct {
_ struct{} `type:"structure"`
// Indicates the status of Multi-AZ for this replication group.
//
// ElastiCache Multi-AZ replication groups are not supported on:
//
// Redis versions earlier than 2.8.6.
//
// Redis (cluster mode disabled):T1 and T2 cache node types.
//
// Redis (cluster mode enabled): T1 node types.
AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"`
// The name of the compute and memory capacity node type for each node in the
// replication group.
CacheNodeType *string `type:"string"`
// A flag indicating whether or not this replication group is cluster enabled;
// i.e., whether its data can be partitioned across multiple shards (API/CLI:
// node groups).
//
// Valid values: true | false
ClusterEnabled *bool `type:"boolean"`
// The configuration endpoint for this replicaiton group. Use the configuration
// endpoint to connect to this replication group.
ConfigurationEndpoint *Endpoint `type:"structure"`
// The description of the replication group.
Description *string `type:"string"`
// The names of all the cache clusters that are part of this replication group.
MemberClusters []*string `locationNameList:"ClusterId" type:"list"`
// A single element list with information about the nodes in the replication
// group.
NodeGroups []*NodeGroup `locationNameList:"NodeGroup" type:"list"`
// A group of settings to be applied to the replication group, either immediately
// or during the next maintenance window.
PendingModifiedValues *ReplicationGroupPendingModifiedValues `type:"structure"`
// The identifier for the replication group.
ReplicationGroupId *string `type:"string"`
// The number of days for which ElastiCache retains automatic cache cluster
// snapshots before deleting them. For example, if you set SnapshotRetentionLimit
// to 5, a snapshot that was taken today is retained for 5 days before being
// deleted.
//
// If the value of SnapshotRetentionLimit is set to zero (0), backups are turned
// off.
SnapshotRetentionLimit *int64 `type:"integer"`
// The daily time range (in UTC) during which ElastiCache begins taking a daily
// snapshot of your node group (shard).
//
// Example: 05:00-09:00
//
// If you do not specify this parameter, ElastiCache automatically chooses an
// appropriate time range.
//
// Note: This parameter is only valid if the Engine parameter is redis.
SnapshotWindow *string `type:"string"`
// The cache cluster ID that is used as the daily snapshot source for the replication
// group.
SnapshottingClusterId *string `type:"string"`
// The current state of this replication group - creating, available, modifying,
// deleting, create-failed, snapshotting.
Status *string `type:"string"`
}
// String returns the string representation
func (s ReplicationGroup) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ReplicationGroup) GoString() string {
return s.String()
}
// SetAutomaticFailover sets the AutomaticFailover field's value.
func (s *ReplicationGroup) SetAutomaticFailover(v string) *ReplicationGroup {
s.AutomaticFailover = &v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *ReplicationGroup) SetCacheNodeType(v string) *ReplicationGroup {
s.CacheNodeType = &v
return s
}
// SetClusterEnabled sets the ClusterEnabled field's value.
func (s *ReplicationGroup) SetClusterEnabled(v bool) *ReplicationGroup {
s.ClusterEnabled = &v
return s
}
// SetConfigurationEndpoint sets the ConfigurationEndpoint field's value.
func (s *ReplicationGroup) SetConfigurationEndpoint(v *Endpoint) *ReplicationGroup {
s.ConfigurationEndpoint = v
return s
}
// SetDescription sets the Description field's value.
func (s *ReplicationGroup) SetDescription(v string) *ReplicationGroup {
s.Description = &v
return s
}
// SetMemberClusters sets the MemberClusters field's value.
func (s *ReplicationGroup) SetMemberClusters(v []*string) *ReplicationGroup {
s.MemberClusters = v
return s
}
// SetNodeGroups sets the NodeGroups field's value.
func (s *ReplicationGroup) SetNodeGroups(v []*NodeGroup) *ReplicationGroup {
s.NodeGroups = v
return s
}
// SetPendingModifiedValues sets the PendingModifiedValues field's value.
func (s *ReplicationGroup) SetPendingModifiedValues(v *ReplicationGroupPendingModifiedValues) *ReplicationGroup {
s.PendingModifiedValues = v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *ReplicationGroup) SetReplicationGroupId(v string) *ReplicationGroup {
s.ReplicationGroupId = &v
return s
}
// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value.
func (s *ReplicationGroup) SetSnapshotRetentionLimit(v int64) *ReplicationGroup {
s.SnapshotRetentionLimit = &v
return s
}
// SetSnapshotWindow sets the SnapshotWindow field's value.
func (s *ReplicationGroup) SetSnapshotWindow(v string) *ReplicationGroup {
s.SnapshotWindow = &v
return s
}
// SetSnapshottingClusterId sets the SnapshottingClusterId field's value.
func (s *ReplicationGroup) SetSnapshottingClusterId(v string) *ReplicationGroup {
s.SnapshottingClusterId = &v
return s
}
// SetStatus sets the Status field's value.
func (s *ReplicationGroup) SetStatus(v string) *ReplicationGroup {
s.Status = &v
return s
}
// The settings to be applied to the Redis replication group, either immediately
// or during the next maintenance window.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ReplicationGroupPendingModifiedValues
type ReplicationGroupPendingModifiedValues struct {
_ struct{} `type:"structure"`
// Indicates the status of Multi-AZ for this Redis replication group.
//
// ElastiCache Multi-AZ replication groups are not supported on:
//
// Redis versions earlier than 2.8.6.
//
// Redis (cluster mode disabled):T1 and T2 cache node types.
//
// Redis (cluster mode enabled): T1 node types.
AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"`
// The primary cluster ID that is applied immediately (if --apply-immediately
// was specified), or during the next maintenance window.
PrimaryClusterId *string `type:"string"`
}
// String returns the string representation
func (s ReplicationGroupPendingModifiedValues) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ReplicationGroupPendingModifiedValues) GoString() string {
return s.String()
}
// SetAutomaticFailoverStatus sets the AutomaticFailoverStatus field's value.
func (s *ReplicationGroupPendingModifiedValues) SetAutomaticFailoverStatus(v string) *ReplicationGroupPendingModifiedValues {
s.AutomaticFailoverStatus = &v
return s
}
// SetPrimaryClusterId sets the PrimaryClusterId field's value.
func (s *ReplicationGroupPendingModifiedValues) SetPrimaryClusterId(v string) *ReplicationGroupPendingModifiedValues {
s.PrimaryClusterId = &v
return s
}
// Represents the output of a PurchaseReservedCacheNodesOffering operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ReservedCacheNode
type ReservedCacheNode struct {
_ struct{} `type:"structure"`
// The number of cache nodes that have been reserved.
CacheNodeCount *int64 `type:"integer"`
// The cache node type for the reserved cache nodes.
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
CacheNodeType *string `type:"string"`
// The duration of the reservation in seconds.
Duration *int64 `type:"integer"`
// The fixed price charged for this reserved cache node.
FixedPrice *float64 `type:"double"`
// The offering type of this reserved cache node.
OfferingType *string `type:"string"`
// The description of the reserved cache node.
ProductDescription *string `type:"string"`
// The recurring price charged to run this reserved cache node.
RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"`
// The unique identifier for the reservation.
ReservedCacheNodeId *string `type:"string"`
// The offering identifier.
ReservedCacheNodesOfferingId *string `type:"string"`
// The time the reservation started.
StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The state of the reserved cache node.
State *string `type:"string"`
// The hourly price charged for this reserved cache node.
UsagePrice *float64 `type:"double"`
}
// String returns the string representation
func (s ReservedCacheNode) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ReservedCacheNode) GoString() string {
return s.String()
}
// SetCacheNodeCount sets the CacheNodeCount field's value.
func (s *ReservedCacheNode) SetCacheNodeCount(v int64) *ReservedCacheNode {
s.CacheNodeCount = &v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *ReservedCacheNode) SetCacheNodeType(v string) *ReservedCacheNode {
s.CacheNodeType = &v
return s
}
// SetDuration sets the Duration field's value.
func (s *ReservedCacheNode) SetDuration(v int64) *ReservedCacheNode {
s.Duration = &v
return s
}
// SetFixedPrice sets the FixedPrice field's value.
func (s *ReservedCacheNode) SetFixedPrice(v float64) *ReservedCacheNode {
s.FixedPrice = &v
return s
}
// SetOfferingType sets the OfferingType field's value.
func (s *ReservedCacheNode) SetOfferingType(v string) *ReservedCacheNode {
s.OfferingType = &v
return s
}
// SetProductDescription sets the ProductDescription field's value.
func (s *ReservedCacheNode) SetProductDescription(v string) *ReservedCacheNode {
s.ProductDescription = &v
return s
}
// SetRecurringCharges sets the RecurringCharges field's value.
func (s *ReservedCacheNode) SetRecurringCharges(v []*RecurringCharge) *ReservedCacheNode {
s.RecurringCharges = v
return s
}
// SetReservedCacheNodeId sets the ReservedCacheNodeId field's value.
func (s *ReservedCacheNode) SetReservedCacheNodeId(v string) *ReservedCacheNode {
s.ReservedCacheNodeId = &v
return s
}
// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value.
func (s *ReservedCacheNode) SetReservedCacheNodesOfferingId(v string) *ReservedCacheNode {
s.ReservedCacheNodesOfferingId = &v
return s
}
// SetStartTime sets the StartTime field's value.
func (s *ReservedCacheNode) SetStartTime(v time.Time) *ReservedCacheNode {
s.StartTime = &v
return s
}
// SetState sets the State field's value.
func (s *ReservedCacheNode) SetState(v string) *ReservedCacheNode {
s.State = &v
return s
}
// SetUsagePrice sets the UsagePrice field's value.
func (s *ReservedCacheNode) SetUsagePrice(v float64) *ReservedCacheNode {
s.UsagePrice = &v
return s
}
// Describes all of the attributes of a reserved cache node offering.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ReservedCacheNodesOffering
type ReservedCacheNodesOffering struct {
_ struct{} `type:"structure"`
// The cache node type for the reserved cache node.
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
CacheNodeType *string `type:"string"`
// The duration of the offering. in seconds.
Duration *int64 `type:"integer"`
// The fixed price charged for this offering.
FixedPrice *float64 `type:"double"`
// The offering type.
OfferingType *string `type:"string"`
// The cache engine used by the offering.
ProductDescription *string `type:"string"`
// The recurring price charged to run this reserved cache node.
RecurringCharges []*RecurringCharge `locationNameList:"RecurringCharge" type:"list"`
// A unique identifier for the reserved cache node offering.
ReservedCacheNodesOfferingId *string `type:"string"`
// The hourly price charged for this offering.
UsagePrice *float64 `type:"double"`
}
// String returns the string representation
func (s ReservedCacheNodesOffering) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ReservedCacheNodesOffering) GoString() string {
return s.String()
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *ReservedCacheNodesOffering) SetCacheNodeType(v string) *ReservedCacheNodesOffering {
s.CacheNodeType = &v
return s
}
// SetDuration sets the Duration field's value.
func (s *ReservedCacheNodesOffering) SetDuration(v int64) *ReservedCacheNodesOffering {
s.Duration = &v
return s
}
// SetFixedPrice sets the FixedPrice field's value.
func (s *ReservedCacheNodesOffering) SetFixedPrice(v float64) *ReservedCacheNodesOffering {
s.FixedPrice = &v
return s
}
// SetOfferingType sets the OfferingType field's value.
func (s *ReservedCacheNodesOffering) SetOfferingType(v string) *ReservedCacheNodesOffering {
s.OfferingType = &v
return s
}
// SetProductDescription sets the ProductDescription field's value.
func (s *ReservedCacheNodesOffering) SetProductDescription(v string) *ReservedCacheNodesOffering {
s.ProductDescription = &v
return s
}
// SetRecurringCharges sets the RecurringCharges field's value.
func (s *ReservedCacheNodesOffering) SetRecurringCharges(v []*RecurringCharge) *ReservedCacheNodesOffering {
s.RecurringCharges = v
return s
}
// SetReservedCacheNodesOfferingId sets the ReservedCacheNodesOfferingId field's value.
func (s *ReservedCacheNodesOffering) SetReservedCacheNodesOfferingId(v string) *ReservedCacheNodesOffering {
s.ReservedCacheNodesOfferingId = &v
return s
}
// SetUsagePrice sets the UsagePrice field's value.
func (s *ReservedCacheNodesOffering) SetUsagePrice(v float64) *ReservedCacheNodesOffering {
s.UsagePrice = &v
return s
}
// Represents the input of a ResetCacheParameterGroup operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/ResetCacheParameterGroupMessage
type ResetCacheParameterGroupInput struct {
_ struct{} `type:"structure"`
// The name of the cache parameter group to reset.
//
// CacheParameterGroupName is a required field
CacheParameterGroupName *string `type:"string" required:"true"`
// An array of parameter names to reset to their default values. If ResetAllParameters
// is true, do not use ParameterNameValues. If ResetAllParameters is false,
// you must specify the name of at least one parameter to reset.
ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list"`
// If true, all parameters in the cache parameter group are reset to their default
// values. If false, only the parameters listed by ParameterNameValues are reset
// to their default values.
//
// Valid values: true | false
ResetAllParameters *bool `type:"boolean"`
}
// String returns the string representation
func (s ResetCacheParameterGroupInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s ResetCacheParameterGroupInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *ResetCacheParameterGroupInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "ResetCacheParameterGroupInput"}
if s.CacheParameterGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *ResetCacheParameterGroupInput) SetCacheParameterGroupName(v string) *ResetCacheParameterGroupInput {
s.CacheParameterGroupName = &v
return s
}
// SetParameterNameValues sets the ParameterNameValues field's value.
func (s *ResetCacheParameterGroupInput) SetParameterNameValues(v []*ParameterNameValue) *ResetCacheParameterGroupInput {
s.ParameterNameValues = v
return s
}
// SetResetAllParameters sets the ResetAllParameters field's value.
func (s *ResetCacheParameterGroupInput) SetResetAllParameters(v bool) *ResetCacheParameterGroupInput {
s.ResetAllParameters = &v
return s
}
// Represents the input of a RevokeCacheSecurityGroupIngress operation.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RevokeCacheSecurityGroupIngressMessage
type RevokeCacheSecurityGroupIngressInput struct {
_ struct{} `type:"structure"`
// The name of the cache security group to revoke ingress from.
//
// CacheSecurityGroupName is a required field
CacheSecurityGroupName *string `type:"string" required:"true"`
// The name of the Amazon EC2 security group to revoke access from.
//
// EC2SecurityGroupName is a required field
EC2SecurityGroupName *string `type:"string" required:"true"`
// The AWS account number of the Amazon EC2 security group owner. Note that
// this is not the same thing as an AWS access key ID - you must provide a valid
// AWS account number for this parameter.
//
// EC2SecurityGroupOwnerId is a required field
EC2SecurityGroupOwnerId *string `type:"string" required:"true"`
}
// String returns the string representation
func (s RevokeCacheSecurityGroupIngressInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RevokeCacheSecurityGroupIngressInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *RevokeCacheSecurityGroupIngressInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "RevokeCacheSecurityGroupIngressInput"}
if s.CacheSecurityGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("CacheSecurityGroupName"))
}
if s.EC2SecurityGroupName == nil {
invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupName"))
}
if s.EC2SecurityGroupOwnerId == nil {
invalidParams.Add(request.NewErrParamRequired("EC2SecurityGroupOwnerId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetCacheSecurityGroupName sets the CacheSecurityGroupName field's value.
func (s *RevokeCacheSecurityGroupIngressInput) SetCacheSecurityGroupName(v string) *RevokeCacheSecurityGroupIngressInput {
s.CacheSecurityGroupName = &v
return s
}
// SetEC2SecurityGroupName sets the EC2SecurityGroupName field's value.
func (s *RevokeCacheSecurityGroupIngressInput) SetEC2SecurityGroupName(v string) *RevokeCacheSecurityGroupIngressInput {
s.EC2SecurityGroupName = &v
return s
}
// SetEC2SecurityGroupOwnerId sets the EC2SecurityGroupOwnerId field's value.
func (s *RevokeCacheSecurityGroupIngressInput) SetEC2SecurityGroupOwnerId(v string) *RevokeCacheSecurityGroupIngressInput {
s.EC2SecurityGroupOwnerId = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/RevokeCacheSecurityGroupIngressResult
type RevokeCacheSecurityGroupIngressOutput struct {
_ struct{} `type:"structure"`
// Represents the output of one of the following operations:
//
// * AuthorizeCacheSecurityGroupIngress
//
// * CreateCacheSecurityGroup
//
// * RevokeCacheSecurityGroupIngress
CacheSecurityGroup *CacheSecurityGroup `type:"structure"`
}
// String returns the string representation
func (s RevokeCacheSecurityGroupIngressOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s RevokeCacheSecurityGroupIngressOutput) GoString() string {
return s.String()
}
// SetCacheSecurityGroup sets the CacheSecurityGroup field's value.
func (s *RevokeCacheSecurityGroupIngressOutput) SetCacheSecurityGroup(v *CacheSecurityGroup) *RevokeCacheSecurityGroupIngressOutput {
s.CacheSecurityGroup = v
return s
}
// Represents a single cache security group and its status.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/SecurityGroupMembership
type SecurityGroupMembership struct {
_ struct{} `type:"structure"`
// The identifier of the cache security group.
SecurityGroupId *string `type:"string"`
// The status of the cache security group membership. The status changes whenever
// a cache security group is modified, or when the cache security groups assigned
// to a cache cluster are modified.
Status *string `type:"string"`
}
// String returns the string representation
func (s SecurityGroupMembership) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s SecurityGroupMembership) GoString() string {
return s.String()
}
// SetSecurityGroupId sets the SecurityGroupId field's value.
func (s *SecurityGroupMembership) SetSecurityGroupId(v string) *SecurityGroupMembership {
s.SecurityGroupId = &v
return s
}
// SetStatus sets the Status field's value.
func (s *SecurityGroupMembership) SetStatus(v string) *SecurityGroupMembership {
s.Status = &v
return s
}
// Represents a copy of an entire Redis cache cluster as of the time when the
// snapshot was taken.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/Snapshot
type Snapshot struct {
_ struct{} `type:"structure"`
// This parameter is currently disabled.
AutoMinorVersionUpgrade *bool `type:"boolean"`
// Indicates the status of Multi-AZ for the source replication group.
//
// ElastiCache Multi-AZ replication groups are not supported on:
//
// Redis versions earlier than 2.8.6.
//
// Redis (cluster mode disabled):T1 and T2 cache node types.
//
// Redis (cluster mode enabled): T1 node types.
AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"`
// The date and time when the source cache cluster was created.
CacheClusterCreateTime *time.Time `type:"timestamp" timestampFormat:"iso8601"`
// The user-supplied identifier of the source cache cluster.
CacheClusterId *string `type:"string"`
// The name of the compute and memory capacity node type for the source cache
// cluster.
//
// Valid node types are as follows:
//
// * General purpose:
//
// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium,
// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge, cache.m4.large, cache.m4.xlarge,
// cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge
//
// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large,
// cache.m1.xlarge
//
// * Compute optimized: cache.c1.xlarge
//
// * Memory optimized:
//
// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge,
// cache.r3.8xlarge
//
// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge
//
// Notes:
//
// * All T2 instances are created in an Amazon Virtual Private Cloud (Amazon
// VPC).
//
// * Redis backup/restore is not supported for Redis (cluster mode disabled)
// T1 and T2 instances. Backup/restore is supported on Redis (cluster mode
// enabled) T2 instances.
//
// * Redis Append-only files (AOF) functionality is not supported for T1
// or T2 instances.
//
// For a complete listing of node types and specifications, see Amazon ElastiCache
// Product Features and Details (http://aws.amazon.com/elasticache/details)
// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#ParameterGroups.Memcached.NodeSpecific)
// or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#ParameterGroups.Redis.NodeSpecific).
CacheNodeType *string `type:"string"`
// The cache parameter group that is associated with the source cache cluster.
CacheParameterGroupName *string `type:"string"`
// The name of the cache subnet group associated with the source cache cluster.
CacheSubnetGroupName *string `type:"string"`
// The name of the cache engine (memcached or redis) used by the source cache
// cluster.
Engine *string `type:"string"`
// The version of the cache engine version that is used by the source cache
// cluster.
EngineVersion *string `type:"string"`
// A list of the cache nodes in the source cache cluster.
NodeSnapshots []*NodeSnapshot `locationNameList:"NodeSnapshot" type:"list"`
// The number of cache nodes in the source cache cluster.
//
// For clusters running Redis, this value must be 1. For clusters running Memcached,
// this value must be between 1 and 20.
NumCacheNodes *int64 `type:"integer"`
// The number of node groups (shards) in this snapshot. When restoring from
// a snapshot, the number of node groups (shards) in the snapshot and in the
// restored replication group must be the same.
NumNodeGroups *int64 `type:"integer"`
// The port number used by each cache nodes in the source cache cluster.
Port *int64 `type:"integer"`
// The name of the Availability Zone in which the source cache cluster is located.
PreferredAvailabilityZone *string `type:"string"`
// Specifies the weekly time range during which maintenance on the cluster is
// performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi
// (24H Clock UTC). The minimum maintenance window is a 60 minute period.
//
// Valid values for ddd are:
//
// * sun
//
// * mon
//
// * tue
//
// * wed
//
// * thu
//
// * fri
//
// * sat
//
// Example: sun:23:00-mon:01:30
PreferredMaintenanceWindow *string `type:"string"`
// A description of the source replication group.
ReplicationGroupDescription *string `type:"string"`
// The unique identifier of the source replication group.
ReplicationGroupId *string `type:"string"`
// The name of a snapshot. For an automatic snapshot, the name is system-generated.
// For a manual snapshot, this is the user-provided name.
SnapshotName *string `type:"string"`
// For an automatic snapshot, the number of days for which ElastiCache retains
// the snapshot before deleting it.
//
// For manual snapshots, this field reflects the SnapshotRetentionLimit for
// the source cache cluster when the snapshot was created. This field is otherwise
// ignored: Manual snapshots do not expire, and can only be deleted using the
// DeleteSnapshot operation.
//
// Important If the value of SnapshotRetentionLimit is set to zero (0), backups
// are turned off.
SnapshotRetentionLimit *int64 `type:"integer"`
// Indicates whether the snapshot is from an automatic backup (automated) or
// was created manually (manual).
SnapshotSource *string `type:"string"`
// The status of the snapshot. Valid values: creating | available | restoring
// | copying | deleting.
SnapshotStatus *string `type:"string"`
// The daily time range during which ElastiCache takes daily snapshots of the
// source cache cluster.
SnapshotWindow *string `type:"string"`
// The Amazon Resource Name (ARN) for the topic used by the source cache cluster
// for publishing notifications.
TopicArn *string `type:"string"`
// The Amazon Virtual Private Cloud identifier (VPC ID) of the cache subnet
// group for the source cache cluster.
VpcId *string `type:"string"`
}
// String returns the string representation
func (s Snapshot) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Snapshot) GoString() string {
return s.String()
}
// SetAutoMinorVersionUpgrade sets the AutoMinorVersionUpgrade field's value.
func (s *Snapshot) SetAutoMinorVersionUpgrade(v bool) *Snapshot {
s.AutoMinorVersionUpgrade = &v
return s
}
// SetAutomaticFailover sets the AutomaticFailover field's value.
func (s *Snapshot) SetAutomaticFailover(v string) *Snapshot {
s.AutomaticFailover = &v
return s
}
// SetCacheClusterCreateTime sets the CacheClusterCreateTime field's value.
func (s *Snapshot) SetCacheClusterCreateTime(v time.Time) *Snapshot {
s.CacheClusterCreateTime = &v
return s
}
// SetCacheClusterId sets the CacheClusterId field's value.
func (s *Snapshot) SetCacheClusterId(v string) *Snapshot {
s.CacheClusterId = &v
return s
}
// SetCacheNodeType sets the CacheNodeType field's value.
func (s *Snapshot) SetCacheNodeType(v string) *Snapshot {
s.CacheNodeType = &v
return s
}
// SetCacheParameterGroupName sets the CacheParameterGroupName field's value.
func (s *Snapshot) SetCacheParameterGroupName(v string) *Snapshot {
s.CacheParameterGroupName = &v
return s
}
// SetCacheSubnetGroupName sets the CacheSubnetGroupName field's value.
func (s *Snapshot) SetCacheSubnetGroupName(v string) *Snapshot {
s.CacheSubnetGroupName = &v
return s
}
// SetEngine sets the Engine field's value.
func (s *Snapshot) SetEngine(v string) *Snapshot {
s.Engine = &v
return s
}
// SetEngineVersion sets the EngineVersion field's value.
func (s *Snapshot) SetEngineVersion(v string) *Snapshot {
s.EngineVersion = &v
return s
}
// SetNodeSnapshots sets the NodeSnapshots field's value.
func (s *Snapshot) SetNodeSnapshots(v []*NodeSnapshot) *Snapshot {
s.NodeSnapshots = v
return s
}
// SetNumCacheNodes sets the NumCacheNodes field's value.
func (s *Snapshot) SetNumCacheNodes(v int64) *Snapshot {
s.NumCacheNodes = &v
return s
}
// SetNumNodeGroups sets the NumNodeGroups field's value.
func (s *Snapshot) SetNumNodeGroups(v int64) *Snapshot {
s.NumNodeGroups = &v
return s
}
// SetPort sets the Port field's value.
func (s *Snapshot) SetPort(v int64) *Snapshot {
s.Port = &v
return s
}
// SetPreferredAvailabilityZone sets the PreferredAvailabilityZone field's value.
func (s *Snapshot) SetPreferredAvailabilityZone(v string) *Snapshot {
s.PreferredAvailabilityZone = &v
return s
}
// SetPreferredMaintenanceWindow sets the PreferredMaintenanceWindow field's value.
func (s *Snapshot) SetPreferredMaintenanceWindow(v string) *Snapshot {
s.PreferredMaintenanceWindow = &v
return s
}
// SetReplicationGroupDescription sets the ReplicationGroupDescription field's value.
func (s *Snapshot) SetReplicationGroupDescription(v string) *Snapshot {
s.ReplicationGroupDescription = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *Snapshot) SetReplicationGroupId(v string) *Snapshot {
s.ReplicationGroupId = &v
return s
}
// SetSnapshotName sets the SnapshotName field's value.
func (s *Snapshot) SetSnapshotName(v string) *Snapshot {
s.SnapshotName = &v
return s
}
// SetSnapshotRetentionLimit sets the SnapshotRetentionLimit field's value.
func (s *Snapshot) SetSnapshotRetentionLimit(v int64) *Snapshot {
s.SnapshotRetentionLimit = &v
return s
}
// SetSnapshotSource sets the SnapshotSource field's value.
func (s *Snapshot) SetSnapshotSource(v string) *Snapshot {
s.SnapshotSource = &v
return s
}
// SetSnapshotStatus sets the SnapshotStatus field's value.
func (s *Snapshot) SetSnapshotStatus(v string) *Snapshot {
s.SnapshotStatus = &v
return s
}
// SetSnapshotWindow sets the SnapshotWindow field's value.
func (s *Snapshot) SetSnapshotWindow(v string) *Snapshot {
s.SnapshotWindow = &v
return s
}
// SetTopicArn sets the TopicArn field's value.
func (s *Snapshot) SetTopicArn(v string) *Snapshot {
s.TopicArn = &v
return s
}
// SetVpcId sets the VpcId field's value.
func (s *Snapshot) SetVpcId(v string) *Snapshot {
s.VpcId = &v
return s
}
// Represents the subnet associated with a cache cluster. This parameter refers
// to subnets defined in Amazon Virtual Private Cloud (Amazon VPC) and used
// with ElastiCache.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/Subnet
type Subnet struct {
_ struct{} `type:"structure"`
// The Availability Zone associated with the subnet.
SubnetAvailabilityZone *AvailabilityZone `type:"structure"`
// The unique identifier for the subnet.
SubnetIdentifier *string `type:"string"`
}
// String returns the string representation
func (s Subnet) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Subnet) GoString() string {
return s.String()
}
// SetSubnetAvailabilityZone sets the SubnetAvailabilityZone field's value.
func (s *Subnet) SetSubnetAvailabilityZone(v *AvailabilityZone) *Subnet {
s.SubnetAvailabilityZone = v
return s
}
// SetSubnetIdentifier sets the SubnetIdentifier field's value.
func (s *Subnet) SetSubnetIdentifier(v string) *Subnet {
s.SubnetIdentifier = &v
return s
}
// A cost allocation Tag that can be added to an ElastiCache cluster or replication
// group. Tags are composed of a Key/Value pair. A tag with a null Value is
// permitted.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/Tag
type Tag struct {
_ struct{} `type:"structure"`
// The key for the tag. May not be null.
Key *string `type:"string"`
// The tag's value. May be null.
Value *string `type:"string"`
}
// String returns the string representation
func (s Tag) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s Tag) GoString() string {
return s.String()
}
// SetKey sets the Key field's value.
func (s *Tag) SetKey(v string) *Tag {
s.Key = &v
return s
}
// SetValue sets the Value field's value.
func (s *Tag) SetValue(v string) *Tag {
s.Value = &v
return s
}
// Represents the output from the AddTagsToResource, ListTagsForResource, and
// RemoveTagsFromResource operations.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TagListMessage
type TagListMessage struct {
_ struct{} `type:"structure"`
// A list of cost allocation tags as key-value pairs.
TagList []*Tag `locationNameList:"Tag" type:"list"`
}
// String returns the string representation
func (s TagListMessage) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TagListMessage) GoString() string {
return s.String()
}
// SetTagList sets the TagList field's value.
func (s *TagListMessage) SetTagList(v []*Tag) *TagListMessage {
s.TagList = v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailoverMessage
type TestFailoverInput struct {
_ struct{} `type:"structure"`
// The name of the node group (called shard in the console) in this replication
// group on which automatic failover is to be tested. You may test automatic
// failover on up to 5 node groups in any rolling 24-hour period.
//
// NodeGroupId is a required field
NodeGroupId *string `type:"string" required:"true"`
// The name of the replication group (console: cluster) whose automatic failover
// is being tested by this operation.
//
// ReplicationGroupId is a required field
ReplicationGroupId *string `type:"string" required:"true"`
}
// String returns the string representation
func (s TestFailoverInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestFailoverInput) GoString() string {
return s.String()
}
// Validate inspects the fields of the type to determine if they are valid.
func (s *TestFailoverInput) Validate() error {
invalidParams := request.ErrInvalidParams{Context: "TestFailoverInput"}
if s.NodeGroupId == nil {
invalidParams.Add(request.NewErrParamRequired("NodeGroupId"))
}
if s.ReplicationGroupId == nil {
invalidParams.Add(request.NewErrParamRequired("ReplicationGroupId"))
}
if invalidParams.Len() > 0 {
return invalidParams
}
return nil
}
// SetNodeGroupId sets the NodeGroupId field's value.
func (s *TestFailoverInput) SetNodeGroupId(v string) *TestFailoverInput {
s.NodeGroupId = &v
return s
}
// SetReplicationGroupId sets the ReplicationGroupId field's value.
func (s *TestFailoverInput) SetReplicationGroupId(v string) *TestFailoverInput {
s.ReplicationGroupId = &v
return s
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/elasticache-2015-02-02/TestFailoverResult
type TestFailoverOutput struct {
_ struct{} `type:"structure"`
// Contains all of the attributes of a specific Redis replication group.
ReplicationGroup *ReplicationGroup `type:"structure"`
}
// String returns the string representation
func (s TestFailoverOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
func (s TestFailoverOutput) GoString() string {
return s.String()
}
// SetReplicationGroup sets the ReplicationGroup field's value.
func (s *TestFailoverOutput) SetReplicationGroup(v *ReplicationGroup) *TestFailoverOutput {
s.ReplicationGroup = v
return s
}
const (
// AZModeSingleAz is a AZMode enum value
AZModeSingleAz = "single-az"
// AZModeCrossAz is a AZMode enum value
AZModeCrossAz = "cross-az"
)
const (
// AutomaticFailoverStatusEnabled is a AutomaticFailoverStatus enum value
AutomaticFailoverStatusEnabled = "enabled"
// AutomaticFailoverStatusDisabled is a AutomaticFailoverStatus enum value
AutomaticFailoverStatusDisabled = "disabled"
// AutomaticFailoverStatusEnabling is a AutomaticFailoverStatus enum value
AutomaticFailoverStatusEnabling = "enabling"
// AutomaticFailoverStatusDisabling is a AutomaticFailoverStatus enum value
AutomaticFailoverStatusDisabling = "disabling"
)
const (
// ChangeTypeImmediate is a ChangeType enum value
ChangeTypeImmediate = "immediate"
// ChangeTypeRequiresReboot is a ChangeType enum value
ChangeTypeRequiresReboot = "requires-reboot"
)
const (
// PendingAutomaticFailoverStatusEnabled is a PendingAutomaticFailoverStatus enum value
PendingAutomaticFailoverStatusEnabled = "enabled"
// PendingAutomaticFailoverStatusDisabled is a PendingAutomaticFailoverStatus enum value
PendingAutomaticFailoverStatusDisabled = "disabled"
)
const (
// SourceTypeCacheCluster is a SourceType enum value
SourceTypeCacheCluster = "cache-cluster"
// SourceTypeCacheParameterGroup is a SourceType enum value
SourceTypeCacheParameterGroup = "cache-parameter-group"
// SourceTypeCacheSecurityGroup is a SourceType enum value
SourceTypeCacheSecurityGroup = "cache-security-group"
// SourceTypeCacheSubnetGroup is a SourceType enum value
SourceTypeCacheSubnetGroup = "cache-subnet-group"
// SourceTypeReplicationGroup is a SourceType enum value
SourceTypeReplicationGroup = "replication-group"
) | // })
//
func (c *ElastiCache) DescribeEventsPages(input *DescribeEventsInput, fn func(*DescribeEventsOutput, bool) bool) error { |
wantmanager.go | package wantmanager
import (
"context"
"math"
bsmsg "github.com/ipfs/go-bitswap/message"
wantlist "github.com/ipfs/go-bitswap/wantlist"
logging "github.com/ipfs/go-log"
cid "github.com/ipfs/go-cid"
metrics "github.com/ipfs/go-metrics-interface"
peer "github.com/libp2p/go-libp2p-core/peer"
)
var log = logging.Logger("bitswap")
const (
// maxPriority is the max priority as defined by the bitswap protocol
maxPriority = math.MaxInt32
)
// PeerHandler sends changes out to the network as they get added to the wantlist
// managed by the WantManager.
type PeerHandler interface {
Disconnected(p peer.ID)
Connected(p peer.ID, initialWants *wantlist.SessionTrackedWantlist)
SendMessage(entries []bsmsg.Entry, targets []peer.ID, from uint64)
}
type wantMessage interface {
handle(wm *WantManager)
}
// WantManager manages a global want list. It tracks two seperate want lists -
// one for all wants, and one for wants that are specifically broadcast to the
// internet.
type WantManager struct {
// channel requests to the run loop
// to get predictable behavior while running this in a go routine
// having only one channel is neccesary, so requests are processed serially
wantMessages chan wantMessage
// synchronized by Run loop, only touch inside there
wl *wantlist.SessionTrackedWantlist
bcwl *wantlist.SessionTrackedWantlist
ctx context.Context
cancel func()
peerHandler PeerHandler
wantlistGauge metrics.Gauge
}
// New initializes a new WantManager for a given context.
func New(ctx context.Context, peerHandler PeerHandler) *WantManager {
ctx, cancel := context.WithCancel(ctx)
wantlistGauge := metrics.NewCtx(ctx, "wantlist_total",
"Number of items in wantlist.").Gauge()
return &WantManager{
wantMessages: make(chan wantMessage, 10),
wl: wantlist.NewSessionTrackedWantlist(),
bcwl: wantlist.NewSessionTrackedWantlist(),
ctx: ctx,
cancel: cancel,
peerHandler: peerHandler,
wantlistGauge: wantlistGauge,
}
}
// WantBlocks adds the given cids to the wantlist, tracked by the given session.
func (wm *WantManager) WantBlocks(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) {
log.Debugf("[wantlist] want blocks; cids=%s, peers=%s, ses=%d", ks, peers, ses)
wm.addEntries(ctx, ks, peers, false, ses)
}
// CancelWants removes the given cids from the wantlist, tracked by the given session.
func (wm *WantManager) CancelWants(ctx context.Context, ks []cid.Cid, peers []peer.ID, ses uint64) {
log.Debugf("[wantlist] unwant blocks; cids=%s, peers=%s, ses=%d", ks, peers, ses)
wm.addEntries(context.Background(), ks, peers, true, ses)
}
// CurrentWants returns the list of current wants.
func (wm *WantManager) CurrentWants() []wantlist.Entry {
resp := make(chan []wantlist.Entry, 1)
select {
case wm.wantMessages <- ¤tWantsMessage{resp}:
case <-wm.ctx.Done():
return nil
}
select {
case wantlist := <-resp:
return wantlist
case <-wm.ctx.Done():
return nil
}
}
// CurrentBroadcastWants returns the current list of wants that are broadcasts.
func (wm *WantManager) CurrentBroadcastWants() []wantlist.Entry {
resp := make(chan []wantlist.Entry, 1)
select {
case wm.wantMessages <- ¤tBroadcastWantsMessage{resp}:
case <-wm.ctx.Done():
return nil
}
select {
case wl := <-resp:
return wl
case <-wm.ctx.Done():
return nil |
// WantCount returns the total count of wants.
func (wm *WantManager) WantCount() int {
resp := make(chan int, 1)
select {
case wm.wantMessages <- &wantCountMessage{resp}:
case <-wm.ctx.Done():
return 0
}
select {
case count := <-resp:
return count
case <-wm.ctx.Done():
return 0
}
}
// Connected is called when a new peer is connected
func (wm *WantManager) Connected(p peer.ID) {
select {
case wm.wantMessages <- &connectedMessage{p}:
case <-wm.ctx.Done():
}
}
// Disconnected is called when a peer is disconnected
func (wm *WantManager) Disconnected(p peer.ID) {
select {
case wm.wantMessages <- &disconnectedMessage{p}:
case <-wm.ctx.Done():
}
}
// Startup starts processing for the WantManager.
func (wm *WantManager) Startup() {
go wm.run()
}
// Shutdown ends processing for the want manager.
func (wm *WantManager) Shutdown() {
wm.cancel()
}
func (wm *WantManager) run() {
// NOTE: Do not open any streams or connections from anywhere in this
// event loop. Really, just don't do anything likely to block.
for {
select {
case message := <-wm.wantMessages:
message.handle(wm)
case <-wm.ctx.Done():
return
}
}
}
func (wm *WantManager) addEntries(ctx context.Context, ks []cid.Cid, targets []peer.ID, cancel bool, ses uint64) {
entries := make([]bsmsg.Entry, 0, len(ks))
for i, k := range ks {
entries = append(entries, bsmsg.Entry{
Cancel: cancel,
Entry: wantlist.NewRefEntry(k, maxPriority-i),
})
}
select {
case wm.wantMessages <- &wantSet{entries: entries, targets: targets, from: ses}:
case <-wm.ctx.Done():
case <-ctx.Done():
}
}
type wantSet struct {
entries []bsmsg.Entry
targets []peer.ID
from uint64
}
func (ws *wantSet) handle(wm *WantManager) {
// is this a broadcast or not?
brdc := len(ws.targets) == 0
// add changes to our wantlist
for _, e := range ws.entries {
if e.Cancel {
if brdc {
wm.bcwl.Remove(e.Cid, ws.from)
}
if wm.wl.Remove(e.Cid, ws.from) {
wm.wantlistGauge.Dec()
}
} else {
if brdc {
wm.bcwl.AddEntry(e.Entry, ws.from)
}
if wm.wl.AddEntry(e.Entry, ws.from) {
wm.wantlistGauge.Inc()
}
}
}
// broadcast those wantlist changes
wm.peerHandler.SendMessage(ws.entries, ws.targets, ws.from)
}
type currentWantsMessage struct {
resp chan<- []wantlist.Entry
}
func (cwm *currentWantsMessage) handle(wm *WantManager) {
cwm.resp <- wm.wl.Entries()
}
type currentBroadcastWantsMessage struct {
resp chan<- []wantlist.Entry
}
func (cbcwm *currentBroadcastWantsMessage) handle(wm *WantManager) {
cbcwm.resp <- wm.bcwl.Entries()
}
type wantCountMessage struct {
resp chan<- int
}
func (wcm *wantCountMessage) handle(wm *WantManager) {
wcm.resp <- wm.wl.Len()
}
type connectedMessage struct {
p peer.ID
}
func (cm *connectedMessage) handle(wm *WantManager) {
wm.peerHandler.Connected(cm.p, wm.bcwl)
}
type disconnectedMessage struct {
p peer.ID
}
func (dm *disconnectedMessage) handle(wm *WantManager) {
wm.peerHandler.Disconnected(dm.p)
} | }
} |
oci_data_safe_target_database_actions.py | #!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_safe_target_database_actions
short_description: Perform actions on a TargetDatabase resource in Oracle Cloud Infrastructure
description:
- Perform actions on a TargetDatabase resource in Oracle Cloud Infrastructure
- For I(action=activate), reactivates a previously deactivated Data Safe target database.
- For I(action=change_compartment), moves the Data Safe target database to the specified compartment.
- For I(action=deactivate), deactivates a target database in Data Safe.
- For I(action=download_privilege_script), downloads the privilege script to grant/revoke required roles from the Data Safe account on the target database.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
credentials:
description:
- ""
- Required for I(action=activate).
type: dict
suboptions:
user_name:
description:
- The database user name.
type: str
required: true
password:
description:
- The password of the database user.
type: str
required: true
target_database_id:
description:
- The OCID of the Data Safe target database.
- Required for I(action=activate), I(action=change_compartment), I(action=deactivate).
type: str
compartment_id:
description:
- The OCID of the new compartment to where you want to move the Data Safe target database.
- Required for I(action=change_compartment).
type: str
dest:
description:
- The destination file path to write the output. The file will be created if it does not exist. If the file already exists, the content will be
overwritten.
- Required for I(action=download_privilege_script).
type: str
action:
description:
- The action to perform on the TargetDatabase.
type: str
required: true
choices:
- "activate"
- "change_compartment"
- "deactivate"
- "download_privilege_script"
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Perform action activate on target_database
oci_data_safe_target_database_actions:
# required
credentials:
# required
user_name: user_name_example
password: example-password
target_database_id: "ocid1.targetdatabase.oc1..xxxxxxEXAMPLExxxxxx"
action: activate
- name: Perform action change_compartment on target_database
oci_data_safe_target_database_actions:
# required
target_database_id: "ocid1.targetdatabase.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id: "ocid1.compartment.oc1..aaaaaaaaqkb3jtub4lnlha7vfvp4f2mj6zxraqfnxernheayom3pljsfppca"
action: change_compartment
- name: Perform action deactivate on target_database
oci_data_safe_target_database_actions:
# required
target_database_id: "ocid1.targetdatabase.oc1..xxxxxxEXAMPLExxxxxx"
action: deactivate
- name: Perform action download_privilege_script on target_database
oci_data_safe_target_database_actions:
# required
dest: /tmp/myfile
action: download_privilege_script
"""
RETURN = """
target_database:
description:
- Details of the TargetDatabase resource acted upon by the current operation
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment which contains the Data Safe target database.
returned: on success
type: str
sample: "ocid1.compartment.oc1..aaaaaaaaqkb3jtub4lnlha7vfvp4f2mj6zxraqfnxernheayom3pljsfppca"
id:
description:
- The OCID of the Data Safe target database.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
display_name:
description:
- The display name of the target database in Data Safe.
returned: on success
type: str
sample: display_name_example
description:
description:
- The description of the target database in Data Safe.
returned: on success
type: str
sample: description_example
database_details:
description:
- ""
returned: on success
type: complex
contains:
database_type:
description:
- The database type.
returned: on success
type: str
sample: DATABASE_CLOUD_SERVICE
infrastructure_type:
description:
- The infrastructure type the database is running on.
returned: on success
type: str
sample: ORACLE_CLOUD
autonomous_database_id:
description:
- The OCID of the autonomous database registered as a target database in Data Safe.
returned: on success
type: str
sample: ocid1.autonomousdatabase.oc1.iad.abuwcljs4hz5fnsyazyoblbm3o24ykw5iiwa44it2jzvhby73r6324522c2q
vm_cluster_id:
description:
- The OCID of the VM cluster in which the database is running.
returned: on success
type: str
sample: ocid1.vmcluster.oc1.iad.abuwc33era5gex4puvmuphzxrfukxaxwqit7ze6egqquxe75ojb3lytlvtkq
db_system_id:
description:
- The OCID of the cloud database system registered as a target database in Data Safe.
returned: on success
type: str
sample: ocid1.dbsystem.oc1.iad.abuwcljsgz57qgikkkpzenfya3uznj356vwjcgqrcy6x2dzcgierhbsonz7q
service_name:
description:
- The database service name.
returned: on success
type: str
sample: service_name_example
instance_id:
description:
- The OCID of the compute instance on which the database is running.
returned: on success
type: str
sample: ocid1.instance.oc1.iad.anuwcljsgn3s2facu6dvc6uzhykv3uj56l5zevbob7dli4pqntjbq5343f2bq
ip_addresses:
description:
- The list of database host IP Addresses. Fully qualified domain names can be used if connectionType is 'ONPREM_CONNECTOR'.
returned: on success
type: list
sample: []
listener_port:
description:
- The port number of the database listener.
returned: on success
type: int
sample: 56
credentials:
description:
- ""
returned: on success
type: complex
contains:
user_name:
description:
- The database user name.
returned: on success
type: str
sample: user_name_example
password:
description:
- The password of the database user.
returned: on success
type: str
sample: example-password
tls_config:
description:
- ""
returned: on success
type: complex
contains:
status:
description:
- Status to represent whether the database connection is TLS enabled or not.
returned: on success
type: str
sample: ENABLED
certificate_store_type:
description:
- The format of the certificate store.
returned: on success
type: str
sample: JKS
store_password:
description:
- The password to read the trust store and key store files, if they are password protected.
returned: on success
type: str
sample: example-password
trust_store_content:
description:
- Base64 encoded string of trust store file content.
returned: on success
type: str
sample: trust_store_content_example
key_store_content:
description:
- Base64 encoded string of key store file content.
returned: on success
type: str
sample: key_store_content_example
connection_option:
description:
- ""
returned: on success
type: complex
contains:
connection_type:
description:
- "The connection type used to connect to the database. Allowed values:
- PRIVATE_ENDPOINT - Represents connection through private endpoint in Data Safe.
- ONPREM_CONNECTOR - Represents connection through on-premises connector in Data Safe."
returned: on success
type: str
sample: PRIVATE_ENDPOINT
on_prem_connector_id:
description:
- The OCID of the on-premises connector.
returned: on success
type: str
sample: "ocid1.onpremconnector.oc1..xxxxxxEXAMPLExxxxxx"
datasafe_private_endpoint_id:
description:
- The OCID of the Data Safe private endpoint.
returned: on success
type: str
sample: "ocid1.datasafeprivateendpoint.oc1..xxxxxxEXAMPLExxxxxx"
lifecycle_state:
description:
- The current state of the target database in Data Safe.
returned: on success
type: str
sample: CREATING
lifecycle_details:
description:
- Details about the current state of the target database in Data Safe.
returned: on success
type: str
sample: lifecycle_details_example
time_created:
description:
- The date and time of target database registration and creation in Data Safe.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- The date and time of the target database update in Data Safe.
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
freeform_tags:
description:
- Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see
L(Resource Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm)
- "Example: `{\\"Department\\": \\"Finance\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see L(Resource
Tags,https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm)
- "Example: `{\\"Operations\\": {\\"CostCenter\\": \\"42\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "System tags for this resource. Each key is predefined and scoped to a namespace. For more information, see Resource Tags.
Example: `{\\"orcl-cloud\\": {\\"free-tier-retained\\": \\"true\\"}}`"
returned: on success
type: dict
sample: {}
sample: {
"compartment_id": "ocid1.compartment.oc1..aaaaaaaaqkb3jtub4lnlha7vfvp4f2mj6zxraqfnxernheayom3pljsfppca",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"display_name": "display_name_example",
"description": "description_example",
"database_details": {
"database_type": "DATABASE_CLOUD_SERVICE",
"infrastructure_type": "ORACLE_CLOUD",
"autonomous_database_id": "ocid1.autonomousdatabase.oc1.iad.abuwcljs4hz5fnsyazyoblbm3o24ykw5iiwa44it2jzvhby73r6324522c2q",
"vm_cluster_id": "ocid1.vmcluster.oc1.iad.abuwc33era5gex4puvmuphzxrfukxaxwqit7ze6egqquxe75ojb3lytlvtkq",
"db_system_id": "ocid1.dbsystem.oc1.iad.abuwcljsgz57qgikkkpzenfya3uznj356vwjcgqrcy6x2dzcgierhbsonz7q",
"service_name": "service_name_example",
"instance_id": "ocid1.instance.oc1.iad.anuwcljsgn3s2facu6dvc6uzhykv3uj56l5zevbob7dli4pqntjbq5343f2bq",
"ip_addresses": [],
"listener_port": 56
},
"credentials": {
"user_name": "user_name_example",
"password": "example-password"
},
"tls_config": {
"status": "ENABLED",
"certificate_store_type": "JKS",
"store_password": "example-password",
"trust_store_content": "trust_store_content_example",
"key_store_content": "key_store_content_example"
},
"connection_option": {
"connection_type": "PRIVATE_ENDPOINT",
"on_prem_connector_id": "ocid1.onpremconnector.oc1..xxxxxxEXAMPLExxxxxx",
"datasafe_private_endpoint_id": "ocid1.datasafeprivateendpoint.oc1..xxxxxxEXAMPLExxxxxx"
},
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example",
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIActionsHelperBase,
get_custom_class,
)
try:
from oci.data_safe import DataSafeClient
from oci.data_safe.models import ActivateTargetDatabaseDetails
from oci.data_safe.models import ChangeTargetDatabaseCompartmentDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DataSafeTargetDatabaseActionsHelperGen(OCIActionsHelperBase):
"""
Supported actions:
activate
change_compartment
deactivate
download_privilege_script
"""
def get_get_fn(self):
return self.client.get_target_database
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_target_database,
target_database_id=self.module.params.get("target_database_id"),
)
def activate(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ActivateTargetDatabaseDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.activate_target_database,
call_fn_args=(),
call_fn_kwargs=dict(
activate_target_database_details=action_details,
target_database_id=self.module.params.get("target_database_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def change_compartment(self):
action_details = oci_common_utils.convert_input_data_to_model_class(
self.module.params, ChangeTargetDatabaseCompartmentDetails
)
return oci_wait_utils.call_and_wait(
call_fn=self.client.change_target_database_compartment,
call_fn_args=(),
call_fn_kwargs=dict(
target_database_id=self.module.params.get("target_database_id"),
change_target_database_compartment_details=action_details,
),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
def | (self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.deactivate_target_database,
call_fn_args=(),
call_fn_kwargs=dict(
target_database_id=self.module.params.get("target_database_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def download_privilege_script(self):
response = oci_wait_utils.call_and_wait(
call_fn=self.client.download_privilege_script,
call_fn_args=(),
call_fn_kwargs=dict(),
waiter_type=oci_wait_utils.NONE_WAITER_KEY,
operation="{0}_{1}".format(
self.module.params.get("action").upper(),
oci_common_utils.ACTION_OPERATION_KEY,
),
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=self.get_action_desired_states(
self.module.params.get("action")
),
)
dest = self.module.params.get("dest")
chunk_size = oci_common_utils.MEBIBYTE
with open(to_bytes(dest), "wb") as dest_file:
for chunk in response.raw.stream(chunk_size, decode_content=True):
dest_file.write(chunk)
return None
DataSafeTargetDatabaseActionsHelperCustom = get_custom_class(
"DataSafeTargetDatabaseActionsHelperCustom"
)
class ResourceHelper(
DataSafeTargetDatabaseActionsHelperCustom, DataSafeTargetDatabaseActionsHelperGen
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=False, supports_wait=True
)
module_args.update(
dict(
credentials=dict(
type="dict",
options=dict(
user_name=dict(type="str", required=True),
password=dict(type="str", required=True, no_log=True),
),
),
target_database_id=dict(type="str"),
compartment_id=dict(type="str"),
dest=dict(type="str"),
action=dict(
type="str",
required=True,
choices=[
"activate",
"change_compartment",
"deactivate",
"download_privilege_script",
],
),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="target_database",
service_client_class=DataSafeClient,
namespace="data_safe",
)
result = resource_helper.perform_action(module.params.get("action"))
module.exit_json(**result)
if __name__ == "__main__":
main()
| deactivate |
deco.service.ts | import {Inject, Injectable} from '@angular/core';
import {FileLoaderService} from '@d78ng/file-loader';
import {AnalysisResult, TK_AFTER_PREPOSITION, TK_CONJUNCTION, TK_CONNECTED, TK_EMPTY_LINE, TK_PREPOSITION, TK_PUNCTUATION, Token} from './model/analysis-result';
import {DATA_DICT, DATA_DICTP} from '../../../../src/app/app.tokens';
@Injectable({
providedIn: 'root'
})
export class DecoService {
private dictionary: Map<string, string[]> = new Map<string, string[]>();
private dictionary_prepositions: Map<string, Map<string, string[]>> = new Map<string, Map<string, string[]>>();
private used: string[] = [];
private replacements: Map<string, string> = new Map<string, string>();
constructor(@Inject(DATA_DICT) private dict: FileLoaderService, @Inject(DATA_DICTP) private dictp: FileLoaderService) {
for (const syl of Object.keys(dict.data)) {
this.dictionary.set(syl, dict.data[syl]);
}
for (const prep of Object.keys(dictp.data)) {
const preposition = dictp.data[prep];
const map: Map<string, string[]> = new Map<string, string[]>();
for (const syl of Object.keys(preposition)) {
map.set(syl, preposition[syl]);
}
this.dictionary_prepositions.set(prep, map);
}
}
async decorate(analysis: AnalysisResult): Promise<string> {
return new Promise<string>((resolve, reject) => {
this.replacements.clear();
for (const line of analysis.lines) {
const significantTokens: Token[] = [];
for (const token of line.tokens) {
if ((token.kind !== TK_PUNCTUATION) && (token.kind !== TK_EMPTY_LINE) && (token.kind !== TK_CONJUNCTION) && (token.kind !== TK_PREPOSITION)) {
token.decorate = (token.kind === TK_CONNECTED);
significantTokens.push(token);
}
}
const step = (analysis.english ? 5 : (significantTokens.length > 7) ? 4 : 3);
for (let i = significantTokens.length-2; i >= 0; i -= step) {
significantTokens[i].decorate = true;
}
}
let result = '';
for (const line of analysis.lines) {
let first = true;
for (const token of line.tokens) {
if ((!first) && (token.kind !== TK_PUNCTUATION)) {
result += ' ';
}
if (token.kind === TK_AFTER_PREPOSITION) {
result += (token.decorate) ? this.replaceTokenByPreposition(token) : token.text;
} else {
result += (token.decorate) ? this.replaceToken(token) : token.text;
}
first = false;
}
result += '\n';
}
resolve(result);
});
}
replaceToken(t: Token): string {
if ((t.syllables != null) && (this.dictionary.has(t.syllables))) {
return this.replaceTokenFromArray(t, this.dictionary.get(t.syllables), 0);
}
return t.text;
}
replaceTokenByPreposition(t: Token): string {
if ((t.type != null) && (this.dictionary_prepositions.has(t.type))) {
const dict: Map<string, string[]> = this.dictionary_prepositions.get(t.type);
if ((t.syllables != null) && (dict.has(t.syllables))) {
return this.replaceTokenFromArray(t, dict.get(t.syllables), 0);
}
}
return this.replaceToken(t);
}
replaceTokenFromArray(t: Token, arr: string[], depth: number): string {
const len = arr.length - 1;
const textlc = t.text.toLowerCase();
let replacement = '';
if (this.replacements.has(textlc)) {
replacement = this.replacements.get(textlc);
} else {
replacement = (len > 1) ? arr[Math.round((Math.random() * len))] : arr[0];
if (this.used.includes(replacement)) {
replacement = (depth < 3) ? this.replaceTokenFromArray(t, arr, depth+1) : t.text;
} else {
this.replacements.set(textlc, replacement);
this.used.push(replacement);
}
}
if (this.used.length > 5) {
this.used.splice(0, 1);
}
if (t.sentenceStart) { | }
return replacement;
}
} | replacement = replacement.charAt(0).toUpperCase() + replacement.slice(1); |
Welcome.js | import React, { useState, useEffect } from 'react';
import './Welcome.css';
export default function Welcome() {
const [welcomeBox, setWelsomeBox] = useState(null);
| var welcomePop = document.getElementById("welcome");
setWelsomeBox(welcomePop);
// Set the window to open automatically on load
// welcomePop.style.display = 'block';
}, [])
// Close the welcome window on button click
// @TODO: Close window on outside click
const closeWindow = () => {
welcomeBox.style.display = 'none';
};
return (
<div className="welcome-popup popup-bg" id="welcome">
{/* Welcome Popup */}
<div id="popup-box">
<div className="close-section">
{/* X close button */}
<div
className="close"
id="close-btn"
onClick={closeWindow}
>
<p>+</p>
</div>
</div>
{/* <div className="logo-img">
<img src="img/painting-palette-icon.png" alt="" />
</div> */}
<div className="popup-text">
<h1>Hi! Ready to get creative?</h1>
<p>Check this stuff out!</p>
</div>
<div className="popup-close">
<button
id="start-drawing-btn"
onClick={closeWindow}>
<h3 className="btn">No! I want to start drawing!</h3>
</button>
</div>
</div>
</div>
)
} | useEffect( () => {
// Get all the buttons and save their state |
test_distributions.py | import numpy as np
import scipy.interpolate
from .. import distributions as D
np.random.seed(1)
def | (distr, left=None, right=None, bounds=None):
# check that mean and stddev from the generated sample
# match what we get from integrating the PDF
def FF1(x):
return distr.pdf(x) * x
def FF2(x):
return distr.pdf(x) * x**2
if left is None:
left = 0
if right is None:
right = np.inf
if bounds is None:
mom1, _ = scipy.integrate.quad(FF1, left, right)
mom2, _ = scipy.integrate.quad(FF2, left, right)
else:
mom1, mom2 = 0, 0
for curb in bounds:
cmom1, _ = scipy.integrate.quad(FF1, curb[0], curb[1])
cmom2, _ = scipy.integrate.quad(FF2, curb[0], curb[1])
mom1 += cmom1
mom2 += cmom2
std = np.sqrt(mom2 - mom1**2)
assert (mom2 > mom1**2)
N = int(1e6)
samps = distr.rvs(N)
assert ((samps.mean() - mom1) < 5 * std / np.sqrt(N))
assert ((samps.std() - std) < 20 * std / np.sqrt(2 * (N - 1)))
def ppftest(distr):
# test that ppf is inverse of cdf
xs = np.random.uniform(0, 1, size=100)
eps = 1e-5
assert (np.all(np.abs(distr.cdf(distr.ppf(xs)) - xs) < eps))
# test on scalar
assert (np.abs(distr.cdf(distr.ppf(xs[0])) - xs[0]) < eps)
assert (np.isnan(distr.ppf(-0.1)))
assert (np.isnan(distr.ppf(1.1)))
def test_lognorm():
ln = D.LogNormal(1, 1)
ln.pdf(1.)
ln.cdf(1)
ln.rvs(1000)
ppftest(ln)
sampltest(ln)
for i in range(10):
N = 100000
mean = np.random.uniform(0.1, 10)
sig = np.random.uniform(0.1, 10)
ln2 = D.LogNormal(mean, sig)
samp = ln2.rvs(N)
# check that the means and sigmas are correct
assert (np.abs(np.log(samp).mean() - np.log(mean)) < 0.01 * sig)
assert (np.abs(np.log(samp).std() - sig) < 0.01 * sig)
def test_broken_plaw():
ln = D.BrokenPowerLaw([-2, -1.1, -3], [0.1, 1, 2, 100])
ln.pdf(1.)
ln.cdf(1)
ln.rvs(1000)
ppftest(ln)
sampltest(ln, 0.05, 120, bounds=[[0.05, 1], [1, 2], [2, 120]])
# test values in each range
assert (np.abs(ln.ppf(ln.cdf(0.5)) - 0.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(1.5)) - 1.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(2.5)) - 2.5) < 1e-5)
def test_distr():
ln = D.TruncatedLogNormal(1, 1, 2, 3)
ln.pdf(1.)
ln.cdf(1)
ln.rvs(1000)
ppftest(ln)
sampltest(ln, 1, 4)
ln = D.PowerLaw(-2, 2, 6)
ln.pdf(1.)
ln.cdf(1)
ln.rvs(1000)
ppftest(ln)
sampltest(ln, 1, 7)
def test_composite():
ln = D.CompositeDistribution([
D.TruncatedLogNormal(1, 1, 2, 3),
D.PowerLaw(-2, 3, 4),
D.TruncatedLogNormal(1, 1, 4, 5),
D.PowerLaw(-3.5, 5, np.inf)
])
ln.pdf(2.5)
ln.cdf(2.5)
ln.rvs(1000)
ppftest(ln)
# test values in each break
assert (np.abs(ln.ppf(ln.cdf(2.5)) - 2.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(3.5)) - 3.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(4.5)) - 4.5) < 1e-5)
assert (np.abs(ln.ppf(ln.cdf(5.5)) - 5.5) < 1e-5)
sampltest(ln, 1, np.inf, bounds=[[1, 3], [3, 4], [4, 5], [5, np.inf]])
def test_bounds():
left, right = 1, 2
tleft, tright = 0.5, 3
ln = D.TruncatedLogNormal(1, 1, left, right)
assert (ln.pdf(tleft) == 0)
assert (ln.pdf(tright) == 0)
assert (ln.cdf(tleft) == 0)
assert (ln.cdf(tright) == 1)
ln = D.PowerLaw(-3, left, right)
assert (ln.pdf(tleft) == 0)
assert (ln.pdf(tright) == 0)
assert (ln.cdf(tleft) == 0)
assert (ln.cdf(tright) == 1)
ln = D.BrokenPowerLaw(
[-2, -1.1, -3],
[left, .6 * left + .3 * right, .3 * left + .6 * right, right])
assert (ln.pdf(tleft) == 0)
assert (ln.pdf(tright) == 0)
assert (ln.cdf(tleft) == 0)
assert (ln.cdf(tright) == 1)
ln = D.CompositeDistribution([
D.TruncatedLogNormal(1, 1, left, .75 * left + .25 * right),
D.PowerLaw(-2, .75 * left + .25 * right, .5 * left + .5 * right),
D.TruncatedLogNormal(1, 1, .5 * left + .5 * right,
.25 * left + .75 * right),
D.PowerLaw(-2, .25 * left + .75 * right, right)
])
assert (ln.pdf(tleft) == 0)
assert (ln.pdf(tright) == 0)
assert (ln.cdf(tleft) == 0)
assert (ln.cdf(tright) == 1)
def integralcheck(distr, left, x, val):
I, EI = scipy.integrate.quad(lambda y: distr.pdf(y), left, x)
assert (np.abs(val - I) < 1e-6)
def integralcheck_many(distr, left, right):
integralcheck(distr, left, right, 1)
N = 100
xs = np.random.uniform(left, right, size=N)
for x in xs:
integralcheck(distr, left, x, distr.cdf(x))
def test_integral():
# test that the numerically integrated pdf is within 3 sigma of 1
# for different kind of pdfs
left, right = 2, 3
distrs = [
D.TruncatedLogNormal(1, 1, left, right),
D.PowerLaw(-2, left, right),
D.BrokenPowerLaw([-2, -1.1, -3],
[left, .6 * left + .3 * right, .3 * left + .6 * right, right]),
D.CompositeDistribution([
D.TruncatedLogNormal(1, 1, left, .75 * left + .25 * right),
D.PowerLaw(-2, .75 * left + .25 * right, .5 * left + .5 * right),
D.TruncatedLogNormal(1, 1,
.5 * left + .5 * right,
.25 * left + .75 * right),
D.PowerLaw(-2, .25 * left + .75 * right, right)]
)
]
for curd in distrs:
integralcheck_many(curd, left, right)
| sampltest |
_family.py | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name='family',
parent_name='mesh3d.colorbar.tickfont',
**kwargs
):
| super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
no_blank=kwargs.pop('no_blank', True),
role=kwargs.pop('role', 'style'),
strict=kwargs.pop('strict', True),
**kwargs
) |
|
tests.py | import json
from datetime import datetime, timedelta
from app import app, db
from app.utils.testing import ApiTestCase
from app.users.models import AppUser, PasswordReset, UserCategory, Country, UserComment
from app.events.models import Event, EventRole
from app.applicationModel.models import ApplicationForm
from app.responses.models import Response
USER_DATA = {
'email': '[email protected]',
'firstname': 'Some',
'lastname': 'Thing',
'user_title': 'Mr',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Male',
'affiliation': 'University',
'department': 'Computer Science',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password': '123456'
}
AUTH_DATA = {
'email': '[email protected]',
'password': '123456'
}
class UserApiTest(ApiTestCase):
def seed_static_data(self):
db.session.add(UserCategory('Postdoc'))
db.session.add(Country('South Africa'))
self.event1 = Event('Indaba', 'Indaba Event',
datetime.now(), datetime.now())
self.event2 = Event('IndabaX', 'IndabaX Sudan',
datetime.now(), datetime.now())
db.session.add(self.event1)
db.session.add(self.event2)
db.session.commit()
self.event1_id = self.event1.id
self.event2_id = self.event2.id
db.session.flush()
def get_auth_header_for(self, email):
body = {
'email': email,
'password': 'abc'
}
response = self.app.post('api/v1/authenticate', data=body)
data = json.loads(response.data)
header = {'Authorization': data['token']}
return header
def test_registration(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert data['id'] == 1
assert len(data['token']) > 10
def test_duplicate_registration(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 201
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 409
def test_get_user(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
headers = {'Authorization': data['token']}
response = self.app.get('/api/v1/user', headers=headers)
data = json.loads(response.data)
assert data['email'] == '[email protected]'
assert data['firstname'] == 'Some'
assert data['lastname'] == 'Thing'
assert data['user_title'] == 'Mr'
assert data['nationality_country'] == 'South Africa'
assert data['residence_country'] == 'South Africa'
assert data['user_gender'] == 'Male'
assert data['affiliation'] == 'University'
assert data['department'] == 'Computer Science'
assert data['user_disability'] == 'None'
assert data['user_category'] == 'Postdoc'
assert data['user_primaryLanguage'] == 'Zulu'
assert data['user_dateOfBirth'] == datetime(
1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S')
def test_update_user(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
headers = {'Authorization': data['token']}
response = self.app.put('/api/v1/user', headers=headers, data={
'email': '[email protected]',
'firstname': 'Updated',
'lastname': 'Updated',
'user_title': 'Mrs',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Female',
'affiliation': 'Company',
'department': 'AI',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12, 0, 0, 0).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password': ''
})
assert response.status_code == 200
response = self.app.get('/api/v1/user', headers=headers)
data = json.loads(response.data)
assert data['email'] == '[email protected]'
assert data['firstname'] == 'Updated'
assert data['lastname'] == 'Updated'
assert data['user_title'] == 'Mrs'
assert data['nationality_country'] == 'South Africa'
assert data['residence_country'] == 'South Africa'
assert data['user_gender'] == 'Female'
assert data['affiliation'] == 'Company'
assert data['department'] == 'AI'
assert data['user_disability'] == 'None'
assert data['user_category'] == 'Postdoc'
assert data['user_primaryLanguage'] == 'Zulu'
assert data['user_dateOfBirth'] == datetime(
1984, 12, 12, 0, 0, 0, 0).strftime('%Y-%m-%dT%H:%M:%S')
def test_authentication_deleted(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
headers = {'Authorization': data['token']}
response = self.app.delete('api/v1/user', headers=headers)
assert response.status_code == 200
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 404
def test_authentication_unverified_email(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 422
def test_authentication_wrong_password(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data={
'email': '[email protected]',
'password': 'wrong'
})
assert response.status_code == 401
def test_authentication(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
assert response.status_code == 200
def test_authentication_response(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
role1 = EventRole('admin', data['id'], self.event1_id)
role2 = EventRole('reviewer', data['id'], self.event2_id)
db.session.add(role1)
db.session.add(role2)
db.session.commit()
db.session.flush()
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
data = json.loads(response.data)
self.assertEqual(data['firstname'], USER_DATA['firstname'])
self.assertEqual(data['lastname'], USER_DATA['lastname'])
self.assertEqual(data['title'], USER_DATA['user_title'])
self.assertEqual(data['roles'], [
{'event_id': self.event1_id, 'role': 'admin'},
{'event_id': self.event2_id, 'role': 'reviewer'},
])
def test_password_reset_user_does_not_exist(self):
response = self.app.post('/api/v1/password-reset/request', data={
'email': '[email protected]'
})
assert response.status_code == 404
def test_password_reset_expired(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/password-reset/request', data={
'email': '[email protected]'
})
assert response.status_code == 201
pw_reset = db.session.query(PasswordReset).first()
pw_reset.date = datetime.now() - timedelta(days=2)
db.session.commit()
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': pw_reset.code,
'password': 'abc123'
})
assert response.status_code == 400
def test_password_reset(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
assert response.status_code == 201
user = db.session.query(AppUser).filter(
AppUser.id == data['id']).first()
response = self.app.get(
'/api/v1/verify-email?token='+user.verify_token)
assert response.status_code == 201
response = self.app.post('/api/v1/password-reset/request', data={
'email': '[email protected]'
})
assert response.status_code == 201
pw_reset = db.session.query(PasswordReset).first()
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': "bad code",
'password': 'abc123'
})
assert response.status_code == 418
response = self.app.post('/api/v1/password-reset/confirm', data={
'code': pw_reset.code,
'password': 'abc123'
})
assert response.status_code == 201
response = self.app.post('/api/v1/authenticate', data={
'email': '[email protected]',
'password': 'abc123'
})
assert response.status_code == 200
def test_deletion(self):
self.seed_static_data()
response = self.app.post('/api/v1/user', data=USER_DATA)
data = json.loads(response.data)
user_id = data['id']
headers = {'Authorization': data['token']}
response = self.app.delete('/api/v1/user', headers=headers)
assert response.status_code == 200
user = db.session.query(AppUser).filter(AppUser.id == user_id).first()
assert user.email == '[email protected]'
assert user.is_deleted == True
def test_resend_verification_email(self):
self.seed_static_data()
self.app.post('/api/v1/user', data=USER_DATA)
response = self.app.get(
'/api/v1/resend-verification-email?email={}'.format(USER_DATA['email']))
assert response.status_code == 201
def test_resend_verification_email_no_user(self):
self.seed_static_data()
response = self.app.get(
'/api/v1/resend-verification-email?email={}'.format('[email protected]'))
assert response.status_code == 404
def setup_verified_user(self):
user = AppUser('[email protected]', 'Some', 'Thing', 'Mr',
1, 1, 'Male', 'University', 'Computer Science',
'None', 1, datetime(1984, 12, 12),
'English', '123456')
user.verify_token = 'existing token'
user.verify()
db.session.add(user)
db.session.commit()
def test_email_change_gets_new_token_and_is_unverified(self):
self.seed_static_data()
self.setup_verified_user()
response = self.app.post('/api/v1/authenticate', data=AUTH_DATA)
data = json.loads(response.data)
headers = {'Authorization': data['token']}
response = self.app.put('/api/v1/user', headers=headers, data={
'email': '[email protected]',
'firstname': 'Some',
'lastname': 'Thing',
'user_title': 'Mr',
'nationality_country_id': 1,
'residence_country_id': 1,
'user_gender': 'Male',
'affiliation': 'University',
'department': 'Computer Science',
'user_disability': 'None',
'user_category_id': 1,
'user_primaryLanguage': 'Zulu',
'user_dateOfBirth': datetime(1984, 12, 12).strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
'password':''
})
self.assertEqual(response.status_code, 200)
user = db.session.query(AppUser).get(1)
self.assertEqual(user.email, '[email protected]')
self.assertEqual(user.firstname, 'Some')
self.assertEqual(user.lastname, 'Thing')
self.assertEqual(user.user_title, 'Mr')
self.assertEqual(user.nationality_country_id, 1)
self.assertEqual(user.residence_country_id, 1)
self.assertEqual(user.user_gender, 'Male')
self.assertEqual(user.affiliation, 'University')
self.assertEqual(user.department, 'Computer Science')
self.assertEqual(user.user_disability, 'None')
self.assertEqual(user.user_category_id, 1)
self.assertEqual(user.user_primaryLanguage, 'Zulu')
self.assertEqual(user.user_dateOfBirth, datetime(1984, 12, 12))
self.assertEqual(user.verified_email, False)
self.assertNotEqual(user.verify_token, 'existing token')
def setup_responses(self):
application_forms = [
ApplicationForm(1, True, datetime(2019, 4, 12)),
ApplicationForm(2, False, datetime(2019, 4, 12))
]
db.session.add_all(application_forms)
candidate1 = AppUser('[email protected]', 'candidate', '1', 'Mr', 1, 1, 'M', 'UWC', 'CS', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
candidate2 = AppUser('[email protected]', 'candidate', '2', 'Ms', 1, 1, 'F', 'RU', 'Chem', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
candidate3 = AppUser('[email protected]', 'candidate', '3', 'Mr', 1, 1, 'M', 'UFH', 'Phys', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
event_admin = AppUser('[email protected]', 'event_admin', '1', 'Ms', 1, 1, 'F', 'NWU', 'Math', 'NA', 1, datetime(1984, 12, 12), 'Eng', 'abc')
users = [candidate1, candidate2, candidate3, event_admin]
for user in users:
user.verify()
db.session.add_all(users)
event_role = EventRole('admin', 4, 1)
db.session.add(event_role)
responses = [
Response(1, 1, True, datetime(2019, 4, 10)),
Response(1, 2, True, datetime(2019, 4, 9), True, datetime(2019, 4, 11)),
Response(2, 3, True)
]
db.session.add_all(responses)
db.session.commit()
def test_user_profile_list(self):
self.seed_static_data()
self.setup_responses()
header = self.get_auth_header_for('[email protected]')
params = {'event_id': 1}
response = self.app.get('/api/v1/userprofilelist', headers=header, data=params)
data = json.loads(response.data)
data = sorted(data, key=lambda k: k['user_id'])
self.assertEqual(len(data), 2)
self.assertEqual(data[0]['user_id'], 1)
self.assertEqual(data[0]['is_submitted'], True)
self.assertEqual(data[0]['submitted_timestamp'], u'2019-04-10T00:00:00')
self.assertEqual(data[0]['is_withdrawn'], False)
self.assertEqual(data[0]['withdrawn_timestamp'], None)
self.assertEqual(data[1]['user_id'], 2)
self.assertEqual(data[1]['is_submitted'], True)
self.assertEqual(data[1]['submitted_timestamp'], u'2019-04-09T00:00:00')
self.assertEqual(data[1]['is_withdrawn'], True)
self.assertEqual(data[1]['withdrawn_timestamp'], u'2019-04-11T00:00:00')
class UserCommentAPITest(ApiTestCase):
def seed_static_data(self):
db.session.add(UserCategory('Postdoc'))
db.session.add(Country('South Africa'))
self.event1 = Event('Indaba', 'Indaba Event',
datetime.now(), datetime.now())
db.session.add(self.event1)
db.session.commit()
self.event1_id = self.event1.id
user_data1 = USER_DATA.copy()
response = self.app.post('/api/v1/user', data=user_data1)
self.user1 = json.loads(response.data) | user_data2 = USER_DATA.copy()
user_data2['email'] = '[email protected]'
user_data2['firstname'] = 'Person'
user_data2['lastname'] = 'Two'
response = self.app.post('/api/v1/user', data=user_data2)
self.user2 = json.loads(response.data)
user2 = db.session.query(AppUser).filter(AppUser.email == '[email protected]').first()
user2.is_admin = True
db.session.flush()
def seed_comments(self):
self.comment1 = UserComment(self.event1_id, self.user1['id'], self.user2['id'], datetime.now(), 'Comment 1')
self.comment2 = UserComment(self.event1_id, self.user1['id'], self.user2['id'], datetime.now(), 'Comment 2')
self.comment3 = UserComment(self.event1_id, self.user2['id'], self.user1['id'], datetime.now(), 'Comment 3')
db.session.add_all([self.comment1, self.comment2, self.comment3])
db.session.flush()
def test_post_comment(self):
with app.app_context():
self.seed_static_data()
params = {'event_id': self.event1_id, 'user_id': self.user2['id'], 'comment': 'Comment1'}
print('Sending params: ', params)
response = self.app.post('/api/v1/user-comment', headers={'Authorization': self.user1['token']}, data=json.dumps(params), content_type='application/json')
data = json.loads(response.data)
self.assertEqual(response.status_code, 201)
def test_get_forbidden(self):
with app.app_context():
self.seed_static_data()
self.seed_comments()
params = {'event_id': self.event1_id, 'user_id': self.user2['id']}
response = self.app.get('/api/v1/user-comment', headers={'Authorization': self.user1['token']}, query_string=params)
self.assertEqual(response.status_code, 403)
def test_get_comments(self):
with app.app_context():
self.seed_static_data()
self.seed_comments()
params = {'event_id': self.event1_id, 'user_id': self.user1['id']}
response = self.app.get('/api/v1/user-comment', headers={'Authorization': self.user2['token']}, query_string=params)
comment_list = json.loads(response.data)
self.assertEqual(len(comment_list), 2)
self.assertEqual(comment_list[0]['event_id'], self.comment1.event_id)
self.assertEqual(comment_list[0]['user_id'], self.comment1.user_id)
self.assertEqual(comment_list[0]['comment_by_user_firstname'], self.user2['firstname'])
self.assertEqual(comment_list[0]['comment_by_user_lastname'], self.user2['lastname'])
self.assertEqual(comment_list[0]['comment'], self.comment1.comment)
self.assertEqual(comment_list[1]['event_id'], self.comment2.event_id)
self.assertEqual(comment_list[1]['user_id'], self.comment2.user_id)
self.assertEqual(comment_list[1]['comment_by_user_firstname'], self.user2['firstname'])
self.assertEqual(comment_list[1]['comment_by_user_lastname'], self.user2['lastname'])
self.assertEqual(comment_list[1]['comment'], self.comment2.comment) | |
router.js | import EmberRouter from '@ember/routing/router';
import config from 'dummy/config/environment';
export default class | extends EmberRouter {
location = config.locationType;
rootURL = config.rootURL;
}
Router.map(function() {
});
| Router |
maxplus_20_83.py | from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_true, msat_make_false
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type
from mathsat import msat_make_and as _msat_make_and
from mathsat import msat_make_or as _msat_make_or
from mathsat import msat_make_not
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
def msat_make_and(menv: msat_env, *args):
if len(args) == 0:
return msat_make_true(menv)
if len(args) == 1:
return args[0]
res = _msat_make_and(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_and(menv, res, arg)
return res
def msat_make_or(menv: msat_env, *args):
if len(args) == 0:
return msat_make_false(menv)
if len(args) == 1:
return args[0]
res = _msat_make_or(menv, args[0], args[1])
for arg in args[2:]:
res = _msat_make_or(menv, res, arg)
return res
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_m1 = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, n_m1)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
names = ["x_0", "x_1", "x_2", "x_3", "x_4", "x_5", "x_6", "x_7", "x_8", "x_9", "x_10", "x_11", "x_12", "x_13", "x_14", "x_15", "x_16", "x_17", "x_18", "x_19"]
xs = [msat_declare_function(menv, name, real_type)
for name in names]
xs = [msat_make_constant(menv, x) for x in xs]
x_xs = [msat_declare_function(menv, name_next(name), real_type)
for name in names]
x_xs = [msat_make_constant(menv, x_x) for x_x in x_xs]
curr2next = {x: x_x for x, x_x in zip(xs, x_xs)}
n_10_0 = msat_make_number(menv, "10.0")
n_11_0 = msat_make_number(menv, "11.0")
n_12_0 = msat_make_number(menv, "12.0")
n_13_0 = msat_make_number(menv, "13.0")
n_14_0 = msat_make_number(menv, "14.0")
n_15_0 = msat_make_number(menv, "15.0")
n_16_0 = msat_make_number(menv, "16.0")
n_17_0 = msat_make_number(menv, "17.0")
n_18_0 = msat_make_number(menv, "18.0")
n_19_0 = msat_make_number(menv, "19.0")
n_1_0 = msat_make_number(menv, "1.0")
n_20_0 = msat_make_number(menv, "20.0")
n_2_0 = msat_make_number(menv, "2.0")
n_3_0 = msat_make_number(menv, "3.0")
n_4_0 = msat_make_number(menv, "4.0")
n_5_0 = msat_make_number(menv, "5.0")
n_6_0 = msat_make_number(menv, "6.0")
n_7_0 = msat_make_number(menv, "7.0")
n_8_0 = msat_make_number(menv, "8.0")
n_9_0 = msat_make_number(menv, "9.0")
init = msat_make_true(menv)
trans = msat_make_true(menv)
# transitions
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_9_0)
expr3 = msat_make_plus(menv, xs[7], n_12_0)
expr4 = msat_make_plus(menv, xs[11], n_20_0)
expr5 = msat_make_plus(menv, xs[12], n_15_0)
expr6 = msat_make_plus(menv, xs[14], n_12_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[18], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[0], expr0),
msat_make_geq(menv, x_xs[0], expr1),
msat_make_geq(menv, x_xs[0], expr2),
msat_make_geq(menv, x_xs[0], expr3),
msat_make_geq(menv, x_xs[0], expr4),
msat_make_geq(menv, x_xs[0], expr5),
msat_make_geq(menv, x_xs[0], expr6),
msat_make_geq(menv, x_xs[0], expr7),
msat_make_geq(menv, x_xs[0], expr8),
msat_make_geq(menv, x_xs[0], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[0], expr0),
msat_make_equal(menv, x_xs[0], expr1),
msat_make_equal(menv, x_xs[0], expr2),
msat_make_equal(menv, x_xs[0], expr3),
msat_make_equal(menv, x_xs[0], expr4),
msat_make_equal(menv, x_xs[0], expr5),
msat_make_equal(menv, x_xs[0], expr6),
msat_make_equal(menv, x_xs[0], expr7),
msat_make_equal(menv, x_xs[0], expr8),
msat_make_equal(menv, x_xs[0], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[4], n_16_0)
expr2 = msat_make_plus(menv, xs[5], n_17_0)
expr3 = msat_make_plus(menv, xs[6], n_6_0)
expr4 = msat_make_plus(menv, xs[8], n_19_0)
expr5 = msat_make_plus(menv, xs[11], n_13_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[15], n_5_0)
expr8 = msat_make_plus(menv, xs[16], n_1_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[1], expr0),
msat_make_geq(menv, x_xs[1], expr1),
msat_make_geq(menv, x_xs[1], expr2),
msat_make_geq(menv, x_xs[1], expr3),
msat_make_geq(menv, x_xs[1], expr4),
msat_make_geq(menv, x_xs[1], expr5),
msat_make_geq(menv, x_xs[1], expr6),
msat_make_geq(menv, x_xs[1], expr7),
msat_make_geq(menv, x_xs[1], expr8),
msat_make_geq(menv, x_xs[1], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[1], expr0),
msat_make_equal(menv, x_xs[1], expr1),
msat_make_equal(menv, x_xs[1], expr2),
msat_make_equal(menv, x_xs[1], expr3),
msat_make_equal(menv, x_xs[1], expr4),
msat_make_equal(menv, x_xs[1], expr5),
msat_make_equal(menv, x_xs[1], expr6),
msat_make_equal(menv, x_xs[1], expr7),
msat_make_equal(menv, x_xs[1], expr8),
msat_make_equal(menv, x_xs[1], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_4_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[3], n_12_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[7], n_13_0)
expr5 = msat_make_plus(menv, xs[8], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_17_0)
expr7 = msat_make_plus(menv, xs[16], n_14_0)
expr8 = msat_make_plus(menv, xs[17], n_1_0)
expr9 = msat_make_plus(menv, xs[19], n_16_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[2], expr0),
msat_make_geq(menv, x_xs[2], expr1),
msat_make_geq(menv, x_xs[2], expr2),
msat_make_geq(menv, x_xs[2], expr3),
msat_make_geq(menv, x_xs[2], expr4),
msat_make_geq(menv, x_xs[2], expr5),
msat_make_geq(menv, x_xs[2], expr6),
msat_make_geq(menv, x_xs[2], expr7),
msat_make_geq(menv, x_xs[2], expr8),
msat_make_geq(menv, x_xs[2], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[2], expr0),
msat_make_equal(menv, x_xs[2], expr1),
msat_make_equal(menv, x_xs[2], expr2),
msat_make_equal(menv, x_xs[2], expr3),
msat_make_equal(menv, x_xs[2], expr4),
msat_make_equal(menv, x_xs[2], expr5),
msat_make_equal(menv, x_xs[2], expr6),
msat_make_equal(menv, x_xs[2], expr7),
msat_make_equal(menv, x_xs[2], expr8),
msat_make_equal(menv, x_xs[2], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_9_0)
expr1 = msat_make_plus(menv, xs[3], n_17_0)
expr2 = msat_make_plus(menv, xs[5], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_5_0)
expr4 = msat_make_plus(menv, xs[13], n_20_0)
expr5 = msat_make_plus(menv, xs[15], n_4_0)
expr6 = msat_make_plus(menv, xs[16], n_20_0)
expr7 = msat_make_plus(menv, xs[17], n_7_0)
expr8 = msat_make_plus(menv, xs[18], n_11_0)
expr9 = msat_make_plus(menv, xs[19], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[3], expr0),
msat_make_geq(menv, x_xs[3], expr1),
msat_make_geq(menv, x_xs[3], expr2),
msat_make_geq(menv, x_xs[3], expr3),
msat_make_geq(menv, x_xs[3], expr4),
msat_make_geq(menv, x_xs[3], expr5),
msat_make_geq(menv, x_xs[3], expr6),
msat_make_geq(menv, x_xs[3], expr7),
msat_make_geq(menv, x_xs[3], expr8),
msat_make_geq(menv, x_xs[3], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[3], expr0),
msat_make_equal(menv, x_xs[3], expr1),
msat_make_equal(menv, x_xs[3], expr2),
msat_make_equal(menv, x_xs[3], expr3),
msat_make_equal(menv, x_xs[3], expr4),
msat_make_equal(menv, x_xs[3], expr5),
msat_make_equal(menv, x_xs[3], expr6),
msat_make_equal(menv, x_xs[3], expr7),
msat_make_equal(menv, x_xs[3], expr8),
msat_make_equal(menv, x_xs[3], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[2], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_13_0)
expr3 = msat_make_plus(menv, xs[5], n_4_0)
expr4 = msat_make_plus(menv, xs[6], n_5_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_16_0)
expr7 = msat_make_plus(menv, xs[14], n_15_0)
expr8 = msat_make_plus(menv, xs[15], n_15_0)
expr9 = msat_make_plus(menv, xs[18], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[4], expr0),
msat_make_geq(menv, x_xs[4], expr1),
msat_make_geq(menv, x_xs[4], expr2),
msat_make_geq(menv, x_xs[4], expr3),
msat_make_geq(menv, x_xs[4], expr4),
msat_make_geq(menv, x_xs[4], expr5),
msat_make_geq(menv, x_xs[4], expr6),
msat_make_geq(menv, x_xs[4], expr7),
msat_make_geq(menv, x_xs[4], expr8),
msat_make_geq(menv, x_xs[4], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[4], expr0),
msat_make_equal(menv, x_xs[4], expr1),
msat_make_equal(menv, x_xs[4], expr2),
msat_make_equal(menv, x_xs[4], expr3),
msat_make_equal(menv, x_xs[4], expr4),
msat_make_equal(menv, x_xs[4], expr5),
msat_make_equal(menv, x_xs[4], expr6),
msat_make_equal(menv, x_xs[4], expr7),
msat_make_equal(menv, x_xs[4], expr8),
msat_make_equal(menv, x_xs[4], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_17_0)
expr1 = msat_make_plus(menv, xs[4], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_18_0)
expr3 = msat_make_plus(menv, xs[6], n_17_0)
expr4 = msat_make_plus(menv, xs[8], n_20_0)
expr5 = msat_make_plus(menv, xs[10], n_7_0)
expr6 = msat_make_plus(menv, xs[14], n_2_0)
expr7 = msat_make_plus(menv, xs[16], n_19_0)
expr8 = msat_make_plus(menv, xs[17], n_12_0)
expr9 = msat_make_plus(menv, xs[18], n_13_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[5], expr0),
msat_make_geq(menv, x_xs[5], expr1),
msat_make_geq(menv, x_xs[5], expr2),
msat_make_geq(menv, x_xs[5], expr3),
msat_make_geq(menv, x_xs[5], expr4),
msat_make_geq(menv, x_xs[5], expr5),
msat_make_geq(menv, x_xs[5], expr6),
msat_make_geq(menv, x_xs[5], expr7),
msat_make_geq(menv, x_xs[5], expr8),
msat_make_geq(menv, x_xs[5], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[5], expr0),
msat_make_equal(menv, x_xs[5], expr1),
msat_make_equal(menv, x_xs[5], expr2),
msat_make_equal(menv, x_xs[5], expr3),
msat_make_equal(menv, x_xs[5], expr4),
msat_make_equal(menv, x_xs[5], expr5),
msat_make_equal(menv, x_xs[5], expr6),
msat_make_equal(menv, x_xs[5], expr7),
msat_make_equal(menv, x_xs[5], expr8),
msat_make_equal(menv, x_xs[5], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[2], n_8_0)
expr1 = msat_make_plus(menv, xs[3], n_2_0)
expr2 = msat_make_plus(menv, xs[5], n_13_0)
expr3 = msat_make_plus(menv, xs[9], n_15_0)
expr4 = msat_make_plus(menv, xs[11], n_12_0)
expr5 = msat_make_plus(menv, xs[13], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_18_0)
expr7 = msat_make_plus(menv, xs[16], n_17_0)
expr8 = msat_make_plus(menv, xs[17], n_7_0)
expr9 = msat_make_plus(menv, xs[18], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[6], expr0),
msat_make_geq(menv, x_xs[6], expr1),
msat_make_geq(menv, x_xs[6], expr2),
msat_make_geq(menv, x_xs[6], expr3), | msat_make_geq(menv, x_xs[6], expr7),
msat_make_geq(menv, x_xs[6], expr8),
msat_make_geq(menv, x_xs[6], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[6], expr0),
msat_make_equal(menv, x_xs[6], expr1),
msat_make_equal(menv, x_xs[6], expr2),
msat_make_equal(menv, x_xs[6], expr3),
msat_make_equal(menv, x_xs[6], expr4),
msat_make_equal(menv, x_xs[6], expr5),
msat_make_equal(menv, x_xs[6], expr6),
msat_make_equal(menv, x_xs[6], expr7),
msat_make_equal(menv, x_xs[6], expr8),
msat_make_equal(menv, x_xs[6], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_3_0)
expr1 = msat_make_plus(menv, xs[3], n_11_0)
expr2 = msat_make_plus(menv, xs[5], n_12_0)
expr3 = msat_make_plus(menv, xs[7], n_3_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_5_0)
expr7 = msat_make_plus(menv, xs[17], n_20_0)
expr8 = msat_make_plus(menv, xs[18], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[7], expr0),
msat_make_geq(menv, x_xs[7], expr1),
msat_make_geq(menv, x_xs[7], expr2),
msat_make_geq(menv, x_xs[7], expr3),
msat_make_geq(menv, x_xs[7], expr4),
msat_make_geq(menv, x_xs[7], expr5),
msat_make_geq(menv, x_xs[7], expr6),
msat_make_geq(menv, x_xs[7], expr7),
msat_make_geq(menv, x_xs[7], expr8),
msat_make_geq(menv, x_xs[7], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[7], expr0),
msat_make_equal(menv, x_xs[7], expr1),
msat_make_equal(menv, x_xs[7], expr2),
msat_make_equal(menv, x_xs[7], expr3),
msat_make_equal(menv, x_xs[7], expr4),
msat_make_equal(menv, x_xs[7], expr5),
msat_make_equal(menv, x_xs[7], expr6),
msat_make_equal(menv, x_xs[7], expr7),
msat_make_equal(menv, x_xs[7], expr8),
msat_make_equal(menv, x_xs[7], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_15_0)
expr1 = msat_make_plus(menv, xs[3], n_9_0)
expr2 = msat_make_plus(menv, xs[5], n_4_0)
expr3 = msat_make_plus(menv, xs[6], n_16_0)
expr4 = msat_make_plus(menv, xs[9], n_3_0)
expr5 = msat_make_plus(menv, xs[10], n_18_0)
expr6 = msat_make_plus(menv, xs[12], n_1_0)
expr7 = msat_make_plus(menv, xs[16], n_7_0)
expr8 = msat_make_plus(menv, xs[17], n_14_0)
expr9 = msat_make_plus(menv, xs[19], n_10_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[8], expr0),
msat_make_geq(menv, x_xs[8], expr1),
msat_make_geq(menv, x_xs[8], expr2),
msat_make_geq(menv, x_xs[8], expr3),
msat_make_geq(menv, x_xs[8], expr4),
msat_make_geq(menv, x_xs[8], expr5),
msat_make_geq(menv, x_xs[8], expr6),
msat_make_geq(menv, x_xs[8], expr7),
msat_make_geq(menv, x_xs[8], expr8),
msat_make_geq(menv, x_xs[8], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[8], expr0),
msat_make_equal(menv, x_xs[8], expr1),
msat_make_equal(menv, x_xs[8], expr2),
msat_make_equal(menv, x_xs[8], expr3),
msat_make_equal(menv, x_xs[8], expr4),
msat_make_equal(menv, x_xs[8], expr5),
msat_make_equal(menv, x_xs[8], expr6),
msat_make_equal(menv, x_xs[8], expr7),
msat_make_equal(menv, x_xs[8], expr8),
msat_make_equal(menv, x_xs[8], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[3], n_5_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[5], n_19_0)
expr3 = msat_make_plus(menv, xs[6], n_9_0)
expr4 = msat_make_plus(menv, xs[10], n_5_0)
expr5 = msat_make_plus(menv, xs[12], n_12_0)
expr6 = msat_make_plus(menv, xs[14], n_7_0)
expr7 = msat_make_plus(menv, xs[15], n_12_0)
expr8 = msat_make_plus(menv, xs[16], n_20_0)
expr9 = msat_make_plus(menv, xs[17], n_3_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[9], expr0),
msat_make_geq(menv, x_xs[9], expr1),
msat_make_geq(menv, x_xs[9], expr2),
msat_make_geq(menv, x_xs[9], expr3),
msat_make_geq(menv, x_xs[9], expr4),
msat_make_geq(menv, x_xs[9], expr5),
msat_make_geq(menv, x_xs[9], expr6),
msat_make_geq(menv, x_xs[9], expr7),
msat_make_geq(menv, x_xs[9], expr8),
msat_make_geq(menv, x_xs[9], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[9], expr0),
msat_make_equal(menv, x_xs[9], expr1),
msat_make_equal(menv, x_xs[9], expr2),
msat_make_equal(menv, x_xs[9], expr3),
msat_make_equal(menv, x_xs[9], expr4),
msat_make_equal(menv, x_xs[9], expr5),
msat_make_equal(menv, x_xs[9], expr6),
msat_make_equal(menv, x_xs[9], expr7),
msat_make_equal(menv, x_xs[9], expr8),
msat_make_equal(menv, x_xs[9], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_20_0)
expr1 = msat_make_plus(menv, xs[3], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[8], n_8_0)
expr4 = msat_make_plus(menv, xs[9], n_8_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[11], n_16_0)
expr7 = msat_make_plus(menv, xs[16], n_18_0)
expr8 = msat_make_plus(menv, xs[17], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[10], expr0),
msat_make_geq(menv, x_xs[10], expr1),
msat_make_geq(menv, x_xs[10], expr2),
msat_make_geq(menv, x_xs[10], expr3),
msat_make_geq(menv, x_xs[10], expr4),
msat_make_geq(menv, x_xs[10], expr5),
msat_make_geq(menv, x_xs[10], expr6),
msat_make_geq(menv, x_xs[10], expr7),
msat_make_geq(menv, x_xs[10], expr8),
msat_make_geq(menv, x_xs[10], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[10], expr0),
msat_make_equal(menv, x_xs[10], expr1),
msat_make_equal(menv, x_xs[10], expr2),
msat_make_equal(menv, x_xs[10], expr3),
msat_make_equal(menv, x_xs[10], expr4),
msat_make_equal(menv, x_xs[10], expr5),
msat_make_equal(menv, x_xs[10], expr6),
msat_make_equal(menv, x_xs[10], expr7),
msat_make_equal(menv, x_xs[10], expr8),
msat_make_equal(menv, x_xs[10], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_8_0)
expr1 = msat_make_plus(menv, xs[4], n_4_0)
expr2 = msat_make_plus(menv, xs[7], n_2_0)
expr3 = msat_make_plus(menv, xs[8], n_12_0)
expr4 = msat_make_plus(menv, xs[10], n_17_0)
expr5 = msat_make_plus(menv, xs[11], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_19_0)
expr7 = msat_make_plus(menv, xs[15], n_9_0)
expr8 = msat_make_plus(menv, xs[18], n_20_0)
expr9 = msat_make_plus(menv, xs[19], n_11_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[11], expr0),
msat_make_geq(menv, x_xs[11], expr1),
msat_make_geq(menv, x_xs[11], expr2),
msat_make_geq(menv, x_xs[11], expr3),
msat_make_geq(menv, x_xs[11], expr4),
msat_make_geq(menv, x_xs[11], expr5),
msat_make_geq(menv, x_xs[11], expr6),
msat_make_geq(menv, x_xs[11], expr7),
msat_make_geq(menv, x_xs[11], expr8),
msat_make_geq(menv, x_xs[11], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[11], expr0),
msat_make_equal(menv, x_xs[11], expr1),
msat_make_equal(menv, x_xs[11], expr2),
msat_make_equal(menv, x_xs[11], expr3),
msat_make_equal(menv, x_xs[11], expr4),
msat_make_equal(menv, x_xs[11], expr5),
msat_make_equal(menv, x_xs[11], expr6),
msat_make_equal(menv, x_xs[11], expr7),
msat_make_equal(menv, x_xs[11], expr8),
msat_make_equal(menv, x_xs[11], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_20_0)
expr1 = msat_make_plus(menv, xs[5], n_1_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[7], n_14_0)
expr4 = msat_make_plus(menv, xs[8], n_13_0)
expr5 = msat_make_plus(menv, xs[10], n_17_0)
expr6 = msat_make_plus(menv, xs[11], n_9_0)
expr7 = msat_make_plus(menv, xs[12], n_8_0)
expr8 = msat_make_plus(menv, xs[13], n_14_0)
expr9 = msat_make_plus(menv, xs[18], n_12_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[12], expr0),
msat_make_geq(menv, x_xs[12], expr1),
msat_make_geq(menv, x_xs[12], expr2),
msat_make_geq(menv, x_xs[12], expr3),
msat_make_geq(menv, x_xs[12], expr4),
msat_make_geq(menv, x_xs[12], expr5),
msat_make_geq(menv, x_xs[12], expr6),
msat_make_geq(menv, x_xs[12], expr7),
msat_make_geq(menv, x_xs[12], expr8),
msat_make_geq(menv, x_xs[12], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[12], expr0),
msat_make_equal(menv, x_xs[12], expr1),
msat_make_equal(menv, x_xs[12], expr2),
msat_make_equal(menv, x_xs[12], expr3),
msat_make_equal(menv, x_xs[12], expr4),
msat_make_equal(menv, x_xs[12], expr5),
msat_make_equal(menv, x_xs[12], expr6),
msat_make_equal(menv, x_xs[12], expr7),
msat_make_equal(menv, x_xs[12], expr8),
msat_make_equal(menv, x_xs[12], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_10_0)
expr1 = msat_make_plus(menv, xs[1], n_15_0)
expr2 = msat_make_plus(menv, xs[2], n_4_0)
expr3 = msat_make_plus(menv, xs[7], n_13_0)
expr4 = msat_make_plus(menv, xs[10], n_15_0)
expr5 = msat_make_plus(menv, xs[12], n_17_0)
expr6 = msat_make_plus(menv, xs[13], n_19_0)
expr7 = msat_make_plus(menv, xs[14], n_7_0)
expr8 = msat_make_plus(menv, xs[15], n_3_0)
expr9 = msat_make_plus(menv, xs[18], n_15_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[13], expr0),
msat_make_geq(menv, x_xs[13], expr1),
msat_make_geq(menv, x_xs[13], expr2),
msat_make_geq(menv, x_xs[13], expr3),
msat_make_geq(menv, x_xs[13], expr4),
msat_make_geq(menv, x_xs[13], expr5),
msat_make_geq(menv, x_xs[13], expr6),
msat_make_geq(menv, x_xs[13], expr7),
msat_make_geq(menv, x_xs[13], expr8),
msat_make_geq(menv, x_xs[13], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[13], expr0),
msat_make_equal(menv, x_xs[13], expr1),
msat_make_equal(menv, x_xs[13], expr2),
msat_make_equal(menv, x_xs[13], expr3),
msat_make_equal(menv, x_xs[13], expr4),
msat_make_equal(menv, x_xs[13], expr5),
msat_make_equal(menv, x_xs[13], expr6),
msat_make_equal(menv, x_xs[13], expr7),
msat_make_equal(menv, x_xs[13], expr8),
msat_make_equal(menv, x_xs[13], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_1_0)
expr1 = msat_make_plus(menv, xs[1], n_1_0)
expr2 = msat_make_plus(menv, xs[4], n_16_0)
expr3 = msat_make_plus(menv, xs[8], n_20_0)
expr4 = msat_make_plus(menv, xs[9], n_12_0)
expr5 = msat_make_plus(menv, xs[10], n_9_0)
expr6 = msat_make_plus(menv, xs[11], n_15_0)
expr7 = msat_make_plus(menv, xs[14], n_11_0)
expr8 = msat_make_plus(menv, xs[18], n_9_0)
expr9 = msat_make_plus(menv, xs[19], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[14], expr0),
msat_make_geq(menv, x_xs[14], expr1),
msat_make_geq(menv, x_xs[14], expr2),
msat_make_geq(menv, x_xs[14], expr3),
msat_make_geq(menv, x_xs[14], expr4),
msat_make_geq(menv, x_xs[14], expr5),
msat_make_geq(menv, x_xs[14], expr6),
msat_make_geq(menv, x_xs[14], expr7),
msat_make_geq(menv, x_xs[14], expr8),
msat_make_geq(menv, x_xs[14], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[14], expr0),
msat_make_equal(menv, x_xs[14], expr1),
msat_make_equal(menv, x_xs[14], expr2),
msat_make_equal(menv, x_xs[14], expr3),
msat_make_equal(menv, x_xs[14], expr4),
msat_make_equal(menv, x_xs[14], expr5),
msat_make_equal(menv, x_xs[14], expr6),
msat_make_equal(menv, x_xs[14], expr7),
msat_make_equal(menv, x_xs[14], expr8),
msat_make_equal(menv, x_xs[14], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[1], n_17_0)
expr1 = msat_make_plus(menv, xs[3], n_18_0)
expr2 = msat_make_plus(menv, xs[6], n_18_0)
expr3 = msat_make_plus(menv, xs[9], n_5_0)
expr4 = msat_make_plus(menv, xs[10], n_16_0)
expr5 = msat_make_plus(menv, xs[13], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_14_0)
expr7 = msat_make_plus(menv, xs[17], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_13_0)
expr9 = msat_make_plus(menv, xs[19], n_9_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[15], expr0),
msat_make_geq(menv, x_xs[15], expr1),
msat_make_geq(menv, x_xs[15], expr2),
msat_make_geq(menv, x_xs[15], expr3),
msat_make_geq(menv, x_xs[15], expr4),
msat_make_geq(menv, x_xs[15], expr5),
msat_make_geq(menv, x_xs[15], expr6),
msat_make_geq(menv, x_xs[15], expr7),
msat_make_geq(menv, x_xs[15], expr8),
msat_make_geq(menv, x_xs[15], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[15], expr0),
msat_make_equal(menv, x_xs[15], expr1),
msat_make_equal(menv, x_xs[15], expr2),
msat_make_equal(menv, x_xs[15], expr3),
msat_make_equal(menv, x_xs[15], expr4),
msat_make_equal(menv, x_xs[15], expr5),
msat_make_equal(menv, x_xs[15], expr6),
msat_make_equal(menv, x_xs[15], expr7),
msat_make_equal(menv, x_xs[15], expr8),
msat_make_equal(menv, x_xs[15], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_14_0)
expr1 = msat_make_plus(menv, xs[1], n_2_0)
expr2 = msat_make_plus(menv, xs[4], n_3_0)
expr3 = msat_make_plus(menv, xs[5], n_18_0)
expr4 = msat_make_plus(menv, xs[6], n_8_0)
expr5 = msat_make_plus(menv, xs[9], n_17_0)
expr6 = msat_make_plus(menv, xs[12], n_17_0)
expr7 = msat_make_plus(menv, xs[13], n_2_0)
expr8 = msat_make_plus(menv, xs[15], n_4_0)
expr9 = msat_make_plus(menv, xs[17], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[16], expr0),
msat_make_geq(menv, x_xs[16], expr1),
msat_make_geq(menv, x_xs[16], expr2),
msat_make_geq(menv, x_xs[16], expr3),
msat_make_geq(menv, x_xs[16], expr4),
msat_make_geq(menv, x_xs[16], expr5),
msat_make_geq(menv, x_xs[16], expr6),
msat_make_geq(menv, x_xs[16], expr7),
msat_make_geq(menv, x_xs[16], expr8),
msat_make_geq(menv, x_xs[16], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[16], expr0),
msat_make_equal(menv, x_xs[16], expr1),
msat_make_equal(menv, x_xs[16], expr2),
msat_make_equal(menv, x_xs[16], expr3),
msat_make_equal(menv, x_xs[16], expr4),
msat_make_equal(menv, x_xs[16], expr5),
msat_make_equal(menv, x_xs[16], expr6),
msat_make_equal(menv, x_xs[16], expr7),
msat_make_equal(menv, x_xs[16], expr8),
msat_make_equal(menv, x_xs[16], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_16_0)
expr1 = msat_make_plus(menv, xs[4], n_14_0)
expr2 = msat_make_plus(menv, xs[6], n_20_0)
expr3 = msat_make_plus(menv, xs[7], n_15_0)
expr4 = msat_make_plus(menv, xs[8], n_2_0)
expr5 = msat_make_plus(menv, xs[11], n_5_0)
expr6 = msat_make_plus(menv, xs[14], n_13_0)
expr7 = msat_make_plus(menv, xs[16], n_10_0)
expr8 = msat_make_plus(menv, xs[18], n_4_0)
expr9 = msat_make_plus(menv, xs[19], n_1_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[17], expr0),
msat_make_geq(menv, x_xs[17], expr1),
msat_make_geq(menv, x_xs[17], expr2),
msat_make_geq(menv, x_xs[17], expr3),
msat_make_geq(menv, x_xs[17], expr4),
msat_make_geq(menv, x_xs[17], expr5),
msat_make_geq(menv, x_xs[17], expr6),
msat_make_geq(menv, x_xs[17], expr7),
msat_make_geq(menv, x_xs[17], expr8),
msat_make_geq(menv, x_xs[17], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[17], expr0),
msat_make_equal(menv, x_xs[17], expr1),
msat_make_equal(menv, x_xs[17], expr2),
msat_make_equal(menv, x_xs[17], expr3),
msat_make_equal(menv, x_xs[17], expr4),
msat_make_equal(menv, x_xs[17], expr5),
msat_make_equal(menv, x_xs[17], expr6),
msat_make_equal(menv, x_xs[17], expr7),
msat_make_equal(menv, x_xs[17], expr8),
msat_make_equal(menv, x_xs[17], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_9_0)
expr1 = msat_make_plus(menv, xs[5], n_12_0)
expr2 = msat_make_plus(menv, xs[6], n_19_0)
expr3 = msat_make_plus(menv, xs[7], n_11_0)
expr4 = msat_make_plus(menv, xs[9], n_9_0)
expr5 = msat_make_plus(menv, xs[10], n_19_0)
expr6 = msat_make_plus(menv, xs[11], n_20_0)
expr7 = msat_make_plus(menv, xs[12], n_2_0)
expr8 = msat_make_plus(menv, xs[13], n_17_0)
expr9 = msat_make_plus(menv, xs[15], n_7_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[18], expr0),
msat_make_geq(menv, x_xs[18], expr1),
msat_make_geq(menv, x_xs[18], expr2),
msat_make_geq(menv, x_xs[18], expr3),
msat_make_geq(menv, x_xs[18], expr4),
msat_make_geq(menv, x_xs[18], expr5),
msat_make_geq(menv, x_xs[18], expr6),
msat_make_geq(menv, x_xs[18], expr7),
msat_make_geq(menv, x_xs[18], expr8),
msat_make_geq(menv, x_xs[18], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[18], expr0),
msat_make_equal(menv, x_xs[18], expr1),
msat_make_equal(menv, x_xs[18], expr2),
msat_make_equal(menv, x_xs[18], expr3),
msat_make_equal(menv, x_xs[18], expr4),
msat_make_equal(menv, x_xs[18], expr5),
msat_make_equal(menv, x_xs[18], expr6),
msat_make_equal(menv, x_xs[18], expr7),
msat_make_equal(menv, x_xs[18], expr8),
msat_make_equal(menv, x_xs[18], expr9),))
trans = msat_make_and(menv, trans, _t)
expr0 = msat_make_plus(menv, xs[0], n_6_0)
expr1 = msat_make_plus(menv, xs[1], n_18_0)
expr2 = msat_make_plus(menv, xs[3], n_2_0)
expr3 = msat_make_plus(menv, xs[7], n_4_0)
expr4 = msat_make_plus(menv, xs[9], n_1_0)
expr5 = msat_make_plus(menv, xs[10], n_2_0)
expr6 = msat_make_plus(menv, xs[14], n_11_0)
expr7 = msat_make_plus(menv, xs[16], n_2_0)
expr8 = msat_make_plus(menv, xs[17], n_16_0)
expr9 = msat_make_plus(menv, xs[19], n_5_0)
_t = msat_make_and(menv,
msat_make_geq(menv, x_xs[19], expr0),
msat_make_geq(menv, x_xs[19], expr1),
msat_make_geq(menv, x_xs[19], expr2),
msat_make_geq(menv, x_xs[19], expr3),
msat_make_geq(menv, x_xs[19], expr4),
msat_make_geq(menv, x_xs[19], expr5),
msat_make_geq(menv, x_xs[19], expr6),
msat_make_geq(menv, x_xs[19], expr7),
msat_make_geq(menv, x_xs[19], expr8),
msat_make_geq(menv, x_xs[19], expr9),)
_t = msat_make_and(menv, _t,
msat_make_or(menv,
msat_make_equal(menv, x_xs[19], expr0),
msat_make_equal(menv, x_xs[19], expr1),
msat_make_equal(menv, x_xs[19], expr2),
msat_make_equal(menv, x_xs[19], expr3),
msat_make_equal(menv, x_xs[19], expr4),
msat_make_equal(menv, x_xs[19], expr5),
msat_make_equal(menv, x_xs[19], expr6),
msat_make_equal(menv, x_xs[19], expr7),
msat_make_equal(menv, x_xs[19], expr8),
msat_make_equal(menv, x_xs[19], expr9),))
trans = msat_make_and(menv, trans, _t)
# ltl property: ((x_4 - x_10 > -8) & ((x_2 - x_12 > 17) U (x_3 - x_14 > 10)))
ltl = msat_make_and(menv, msat_make_gt(menv, msat_make_minus(menv, xs[4], xs[10]), msat_make_number(menv, "-8")), enc.make_U(msat_make_gt(menv, msat_make_minus(menv, xs[2], xs[12]), msat_make_number(menv, "17")), msat_make_gt(menv, msat_make_minus(menv, xs[3], xs[14]), msat_make_number(menv, "10"))))
return TermMap(curr2next), init, trans, ltl | msat_make_geq(menv, x_xs[6], expr4),
msat_make_geq(menv, x_xs[6], expr5),
msat_make_geq(menv, x_xs[6], expr6), |
producer.py | from confluent_kafka import Producer
import socket
if __name__ == '__main__':
print("Starting Kafka Producer")
producer_config = {'client.id': socket.gethostname(),
'bootstrap.servers': 'localhost:9092'}
print("Creating Producer")
producer = Producer(producer_config)
print("Producing Kafka Message")
for i in range(1, 101):
for j in range(1, 10001): | producer.flush()
print("Finished Kafka Producer") | producer.produce('hello-producer', key=str(j*i), value="Simple Message-" + str(j*i))
producer.poll()
|
median.go | package utils
import "sort"
// Median returns the median value of the supplied numbers
func Median(numbers []float64) float64 {
worknumbers := make([]float64, len(numbers))
copy(worknumbers, numbers)
sort.Float64s(worknumbers) |
index := len(worknumbers) / 2
if len(worknumbers)%2 != 0 {
return worknumbers[index]
}
return (worknumbers[index-1] + worknumbers[index]) / 2
} | |
models.py | from __future__ import print_function, absolute_import, division
import tensorflow as tf
from tensorflow.contrib import layers
mu = 1.0e-6
@tf.custom_gradient
def f_norm(x):
f2 = tf.square(tf.norm(x, ord='fro', axis=[-2, -1]))
f = tf.sqrt(f2 + mu ** 2) - mu
def grad(dy):
return dy * (x / tf.sqrt(f2 + mu ** 2))
return f, grad
@tf.custom_gradient
def l2_norm(x):
f2 = tf.square(tf.norm(x, ord=2))
f = tf.sqrt(f2 + mu ** 2) - mu
def grad(dy):
return dy * (x / tf.sqrt(f2 + mu ** 2))
return f, grad
class RSCConvAE:
'''
Duet Robust Deep Subspace Clustering
'''
def __init__(self, n_input, kernel_size, n_hidden, z_dim, lamda1=1.0,
lamda2=1.0, eta1=1.0, eta2=1.0, batch_size=200, reg=None,
denoise=False, save_path=None, restore_path=None,
normalize_input=False, logs_path='./logs'):
|
def _build_input(self, denoise, normalize_input):
if not normalize_input:
x_input = self.x
else:
x_input = tf.map_fn(
lambda frame: tf.image.per_image_standardization(frame), self.x)
if denoise:
x_input = tf.add(self.x, tf.random_normal(shape=tf.shape(self.x),
mean=0,
stddev=0.2,
dtype=tf.float32))
return x_input
def _forward(self, denoise, normalize_input, weights):
x_input = self._build_input(denoise, normalize_input)
latent, shape = self.encoder(x_input, weights)
z = tf.reshape(latent, [self.batch_size, -1])
Coef = weights['Coef']
Coef = Coef - tf.diag(tf.diag_part(Coef))
z_c = tf.matmul(Coef, z)
latent_c = tf.reshape(z_c, tf.shape(latent))
x_r = self.decoder(latent_c, weights, shape)
z_diff = z - z_c
x_diff = x_input - x_r
return z, Coef, x_r, x_diff, z_diff
def _get_reconstruction_loss(self, eta1):
reconst_cost = tf.square(self.x_diff - self.x_noise) # l2
x_noise_3dim = tf.squeeze(self.x_noise)
x_noise_group_reg = tf.map_fn(
lambda frame: f_norm(frame), x_noise_3dim)
reconst_cost = 0.5 * tf.reduce_sum(reconst_cost) + \
eta1 * tf.reduce_sum(x_noise_group_reg)
return reconst_cost
def _get_coef_reg_loss(self, reg_type='l2'):
if reg_type is 'l2':
loss = tf.reduce_sum(tf.square(self.Coef))
elif reg_type is 'l1':
loss = tf.reduce_sum(tf.abs(self.Coef))
return loss
def _initialize_weights(self):
all_weights = dict()
n_layers = len(self.n_hidden)
# all_weights['Coef'] = tf.Variable(
# tf.random_normal([self.batch_size, self.batch_size],
# mean=0.0, stddev=0.1, dtype=tf.float32,
# seed=None), name='Coef')
all_weights['Coef'] = tf.Variable(
0 * tf.ones([self.batch_size, self.batch_size], tf.float32), name='Coef')
all_weights['x_noise'] = tf.Variable(
tf.zeros([self.batch_size, self.n_input[0],
self.n_input[1], 1], tf.float32), name='Coef')
all_weights['z_noise'] = tf.Variable(
tf.zeros([self.batch_size, self.z_dim], tf.float32), name='Coef')
all_weights['enc_w0'] = tf.get_variable("enc_w0", shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
all_weights['enc_b0'] = tf.Variable(
tf.zeros([self.n_hidden[0]], dtype=tf.float32))
for iter_i in range(1, n_layers):
enc_name_wi = 'enc_w' + str(iter_i)
all_weights[enc_name_wi] = tf.get_variable(enc_name_wi, shape=[self.kernel_size[iter_i], self.kernel_size[iter_i], self.n_hidden[iter_i - 1],
self.n_hidden[iter_i]], initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
enc_name_bi = 'enc_b' + str(iter_i)
all_weights[enc_name_bi] = tf.Variable(
tf.zeros([self.n_hidden[iter_i]], dtype=tf.float32))
for iter_i in range(1, n_layers):
dec_name_wi = 'dec_w' + str(iter_i - 1)
all_weights[dec_name_wi] = tf.get_variable(dec_name_wi, shape=[self.kernel_size[n_layers - iter_i], self.kernel_size[n_layers - iter_i],
self.n_hidden[n_layers - iter_i - 1], self.n_hidden[n_layers - iter_i]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
dec_name_bi = 'dec_b' + str(iter_i - 1)
all_weights[dec_name_bi] = tf.Variable(tf.zeros(
[self.n_hidden[n_layers - iter_i - 1]], dtype=tf.float32))
dec_name_wi = 'dec_w' + str(n_layers - 1)
all_weights[dec_name_wi] = tf.get_variable(dec_name_wi, shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
dec_name_bi = 'dec_b' + str(n_layers - 1)
all_weights[dec_name_bi] = tf.Variable(
tf.zeros([1], dtype=tf.float32))
return all_weights
# Building the encoder
def encoder(self, x, weights):
shapes = []
shapes.append(x.get_shape().as_list())
layeri = tf.nn.bias_add(tf.nn.conv2d(x, weights['enc_w0'], strides=[
1, 2, 2, 1], padding='SAME'), weights['enc_b0'])
layeri = tf.nn.relu(layeri)
shapes.append(layeri.get_shape().as_list())
for iter_i in range(1, len(self.n_hidden)):
layeri = tf.nn.bias_add(tf.nn.conv2d(layeri, weights['enc_w' + str(iter_i)], strides=[
1, 2, 2, 1], padding='SAME'), weights['enc_b' + str(iter_i)])
layeri = tf.nn.relu(layeri)
shapes.append(layeri.get_shape().as_list())
layer3 = layeri
return layer3, shapes
# Building the decoder
def decoder(self, z, weights, shapes):
n_layers = len(self.n_hidden)
layer3 = z
for iter_i in range(n_layers):
shape_de = shapes[n_layers - iter_i - 1]
layer3 = tf.add(tf.nn.conv2d_transpose(layer3, weights['dec_w' + str(iter_i)], tf.stack([tf.shape(self.x)[0], shape_de[1], shape_de[2], shape_de[3]]),
strides=[1, 2, 2, 1], padding='SAME'), weights['dec_b' + str(iter_i)])
layer3 = tf.nn.relu(layer3)
return layer3
def partial_fit(self, X, lr):
cost, summary, _, Coef, z_diff, x_diff = self.sess.run(
(self.loss, self.merged_summary_op, self.optimizer, self.Coef,
self.z_diff, self.x_diff),
feed_dict={self.x: X, self.learning_rate: lr})
self.summary_writer.add_summary(summary, self.iter)
self.iter = self.iter + 1
return cost, Coef, z_diff, x_diff
def initlization(self):
self.sess.run(self.init)
def reconstruct(self, X):
return self.sess.run(self.x_r, feed_dict={self.x: X})
def transform(self, X):
return self.sess.run(self.z, feed_dict={self.x: X})
def save_model(self):
save_path = self.saver.save(self.sess, self.save_path)
print("model saved in file: %s" % save_path)
def restore(self):
self.saver.restore(self.sess, self.restore_path)
print("model restored")
| self.n_input = n_input
self.kernel_size = kernel_size
self.n_hidden = n_hidden
self.batch_size = batch_size
self.z_dim = z_dim
self.reg = reg
self.save_path = save_path
self.restore_path = restore_path
self.iter = 0
# input required to be fed
self.x = tf.placeholder(tf.float32, [None, n_input[0], n_input[1], 1])
self.learning_rate = tf.placeholder(tf.float32, [])
weights = self._initialize_weights()
self.x_noise = weights['x_noise']
self.z_noise = weights['z_noise']
self.z, self.Coef, self.x_r, self.x_diff, self.z_diff = \
self._forward(denoise, normalize_input, weights)
# l_2 reconstruction loss
self.reconst_cost = self._get_reconstruction_loss(eta1)
tf.summary.scalar("recons_loss", self.reconst_cost)
self.reg_loss = self._get_coef_reg_loss(reg_type='l2') # l2 reg
tf.summary.scalar("reg_loss", lamda2 * self.reg_loss)
selfexpress_cost = tf.square(self.z_diff - self.z_noise)
z_noise_reg = tf.map_fn(lambda frame: l2_norm(frame), self.z_noise)
self.selfexpress_loss = 0.5 * \
tf.reduce_sum(selfexpress_cost) + eta2 * tf.reduce_sum(z_noise_reg)
tf.summary.scalar("selfexpress_loss", lamda1 *
self.selfexpress_loss)
self.loss = self.reconst_cost + lamda1 * \
self.selfexpress_loss + lamda2 * self.reg_loss
self.merged_summary_op = tf.summary.merge_all()
self.optimizer = tf.train.AdamOptimizer(
# self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate).minimize(self.loss)
self.init = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init)
self.saver = tf.train.Saver(
[v for v in tf.trainable_variables() if not (v.name.startswith("Coef"))])
self.summary_writer = tf.summary.FileWriter(
logs_path, graph=tf.get_default_graph()) |
chain_spec.rs | use std::str::FromStr;
use sp_core::{Pair, Public, sr25519, ed25519, OpaquePeerId};
use logion_node_runtime::{
opaque::SessionKeys,
AccountId,
AuraConfig,
Balance,
BalancesConfig,
GenesisConfig,
GrandpaConfig,
LoAuthorityListConfig,
NodeAuthorizationConfig,
Signature,
SessionConfig,
SudoConfig,
SystemConfig,
ValidatorSetConfig,
WASM_BINARY
};
use sp_consensus_aura::sr25519::AuthorityId as AuraId;
use sp_finality_grandpa::AuthorityId as GrandpaId;
use sp_runtime::traits::{Verify, IdentifyAccount};
use sc_service::ChainType;
use serde_json::json;
// The URL for the telemetry server.
// const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/";
/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type.
pub type ChainSpec = sc_service::GenericChainSpec<GenesisConfig>;
/// Generate a crypto pair from seed.
pub fn get_from_seed<TPublic: Public>(seed: &str) -> <TPublic::Pair as Pair>::Public {
TPublic::Pair::from_string(&format!("//{}", seed), None)
.expect("static values are valid; qed")
.public()
}
fn | (aura: AuraId, grandpa: GrandpaId) -> SessionKeys {
SessionKeys { aura, grandpa }
}
type AccountPublic = <Signature as Verify>::Signer;
/// Generate an account ID from seed.
pub fn get_account_id_from_seed<TPublic: Public>(seed: &str) -> AccountId where
AccountPublic: From<<TPublic::Pair as Pair>::Public>
{
AccountPublic::from(get_from_seed::<TPublic>(seed)).into_account()
}
/// Generate an Aura authority key.
pub fn authority_keys_from_seed(s: &str) -> (AccountId, AuraId, GrandpaId) {
(
get_account_id_from_seed::<sr25519::Public>(s),
get_from_seed::<AuraId>(s),
get_from_seed::<GrandpaId>(s),
)
}
pub fn development_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?;
Ok(ChainSpec::from_genesis(
// Name
"Logion Development",
// ID
"logion_dev",
ChainType::Development,
move || logion_genesis(
wasm_binary,
// Initial PoA authorities
vec![
authority_keys_from_seed("Alice"),
],
// Sudo account
get_account_id_from_seed::<sr25519::Public>("Alice"),
// Pre-funded accounts
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
],
// Initial authorized nodes
vec![
(
OpaquePeerId(bs58::decode("12D3KooWBmAwcd4PJNJvfV89HwE48nwkRmAgo8Vy3uQEyNNHBox2").into_vec().unwrap()),
get_account_id_from_seed::<sr25519::Public>("Alice")
),
],
vec![ // Initial set of Logion Legal Officers
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
],
),
// Bootnodes
vec![],
// Telemetry
None,
// Protocol ID
None,
// Properties
Some(default_properties()),
// Extensions
None,
))
}
pub fn local_testnet_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?;
Ok(ChainSpec::from_genesis(
// Name
"Logion Local Testnet",
// ID
"logion_local_testnet",
ChainType::Local,
move || logion_genesis(
wasm_binary,
// Initial PoA authorities
vec![
authority_keys_from_seed("Alice"),
authority_keys_from_seed("Bob"),
authority_keys_from_seed("Charlie"),
],
// Sudo account
get_account_id_from_seed::<sr25519::Public>("Alice"),
// Pre-funded accounts
vec![
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
get_account_id_from_seed::<sr25519::Public>("Dave"),
get_account_id_from_seed::<sr25519::Public>("Eve"),
get_account_id_from_seed::<sr25519::Public>("Ferdie"),
get_account_id_from_seed::<sr25519::Public>("Alice//stash"),
get_account_id_from_seed::<sr25519::Public>("Bob//stash"),
get_account_id_from_seed::<sr25519::Public>("Charlie//stash"),
get_account_id_from_seed::<sr25519::Public>("Dave//stash"),
get_account_id_from_seed::<sr25519::Public>("Eve//stash"),
get_account_id_from_seed::<sr25519::Public>("Ferdie//stash"),
],
// Initial authorized nodes
vec![
(
OpaquePeerId(bs58::decode("12D3KooWBmAwcd4PJNJvfV89HwE48nwkRmAgo8Vy3uQEyNNHBox2").into_vec().unwrap()),
get_account_id_from_seed::<sr25519::Public>("Alice")
),
(
OpaquePeerId(bs58::decode("12D3KooWQYV9dGMFoRzNStwpXztXaBUjtPqi6aU76ZgUriHhKust").into_vec().unwrap()),
get_account_id_from_seed::<sr25519::Public>("Bob")
),
(
OpaquePeerId(bs58::decode("12D3KooWJvyP3VJYymTqG7eH4PM5rN4T2agk5cdNCfNymAqwqcvZ").into_vec().unwrap()),
get_account_id_from_seed::<sr25519::Public>("Charlie")
),
],
vec![ // Initial set of Logion Legal Officers
get_account_id_from_seed::<sr25519::Public>("Alice"),
get_account_id_from_seed::<sr25519::Public>("Bob"),
get_account_id_from_seed::<sr25519::Public>("Charlie"),
],
),
// Bootnodes
vec![],
// Telemetry
None,
// Protocol ID
None,
// Properties
Some(default_properties()),
// Extensions
None,
))
}
pub fn mvp_config() -> Result<ChainSpec, String> {
let wasm_binary = WASM_BINARY.ok_or_else(|| "Development wasm not available".to_string())?;
const ROOT_PUBLIC_SR25519: &str = "5FUg3QWfipPf8yKv5hMK6wQf8nn6og9BbRNcr3Y8CwUJwTh9";
const NODE1_PUBLIC_SR25519: &str = "5DjzFDhFidvGCuuy6i8Lsi4XyruYjxTTkJKb1o7XzVdMNPVb";
const NODE1_PUBLIC_ED25519: &str = "5EVSLLEFUhrWtb5n7tC7ud91nT1qFodhYkAkxdbNpJznqTZ5";
const NODE1_PEER_ID: &str = "12D3KooWPPCrBT2WxxPuBmdMFRs1JddaZjTPWvNdgRzWoFzZw2yT";
const NODE2_PUBLIC_SR25519: &str = "5DoD9n61SssFiWQDTD7bz1eX3KCxZJ6trVj2GsDwMi2PqP85";
const NODE2_PUBLIC_ED25519: &str = "5CUJgAjKLb64bHFFbLu5hQzgR28zH6apcymSDLV1RBFujVjW";
const NODE2_PEER_ID: &str = "12D3KooWSweFqPDamxmzjpgX7Q4bvfnpRKzTJ1igsYLU2ZsLL1TM";
const NODE3_PUBLIC_SR25519: &str = "5CJTSSJ4v1RAauZpeqTeddyui4wESZZqPor33wum9aKuQXZC";
const NODE3_PUBLIC_ED25519: &str = "5FuUhqoi1BhAf92K5DnKPUFDrYNDX4JUAQKgT3AvCNewjpTw";
const NODE3_PEER_ID: &str = "12D3KooWJSnG148nKuds3cEjYrjFMPNWh6biVBPxuppgQnn1owZC";
Ok(ChainSpec::from_genesis(
// Name
"Logion MVP",
// ID
"logion_mvp",
ChainType::Live,
move || logion_genesis(
wasm_binary,
// Initial PoA authorities
vec![
(
AccountId::from_str(NODE1_PUBLIC_SR25519).unwrap(),
AuraId::from(sr25519::Public::from_str(NODE1_PUBLIC_SR25519).unwrap()),
GrandpaId::from(ed25519::Public::from_str(NODE1_PUBLIC_ED25519).unwrap()),
),
(
AccountId::from_str(NODE2_PUBLIC_SR25519).unwrap(),
AuraId::from(sr25519::Public::from_str(NODE2_PUBLIC_SR25519).unwrap()),
GrandpaId::from(ed25519::Public::from_str(NODE2_PUBLIC_ED25519).unwrap()),
)
,
(
AccountId::from_str(NODE3_PUBLIC_SR25519).unwrap(),
AuraId::from(sr25519::Public::from_str(NODE3_PUBLIC_SR25519).unwrap()),
GrandpaId::from(ed25519::Public::from_str(NODE3_PUBLIC_ED25519).unwrap()),
)
],
// Sudo account
AccountId::from_str(ROOT_PUBLIC_SR25519).unwrap(),
// Pre-funded accounts
vec![
AccountId::from_str(ROOT_PUBLIC_SR25519).unwrap(),
AccountId::from_str(NODE1_PUBLIC_SR25519).unwrap(),
AccountId::from_str(NODE2_PUBLIC_SR25519).unwrap(),
AccountId::from_str(NODE3_PUBLIC_SR25519).unwrap(),
],
// Initial authorized nodes
vec![
(
OpaquePeerId(bs58::decode(NODE1_PEER_ID).into_vec().unwrap()),
AccountId::from_str(NODE1_PUBLIC_SR25519).unwrap()
),
(
OpaquePeerId(bs58::decode(NODE2_PEER_ID).into_vec().unwrap()),
AccountId::from_str(NODE2_PUBLIC_SR25519).unwrap()
),
(
OpaquePeerId(bs58::decode(NODE3_PEER_ID).into_vec().unwrap()),
AccountId::from_str(NODE3_PUBLIC_SR25519).unwrap()
)
],
vec![ // Initial set of Logion Legal Officers
],
),
// Bootnodes
vec![],
// Telemetry
None,
// Protocol ID
None,
// Properties
Some(default_properties()),
// Extensions
None,
))
}
const INITIAL_BALANCE: Balance = 100_000_000_000_000_000_000_000;
/// Configure initial storage state for FRAME modules.
fn logion_genesis(
wasm_binary: &[u8],
initial_authorities: Vec<(AccountId, AuraId, GrandpaId)>,
root_key: AccountId,
endowed_accounts: Vec<AccountId>,
initial_authorized_nodes: Vec<(OpaquePeerId, AccountId)>,
legal_officers: Vec<AccountId>,
) -> GenesisConfig {
GenesisConfig {
frame_system: Some(SystemConfig {
// Add Wasm runtime to storage.
code: wasm_binary.to_vec(),
changes_trie_config: Default::default(),
}),
pallet_balances: Some(BalancesConfig {
// Configure endowed accounts with initial balance.
balances: endowed_accounts.iter().cloned().map(|k|(k, INITIAL_BALANCE)).collect(),
}),
pallet_validator_set: Some(ValidatorSetConfig {
validators: initial_authorities.iter().map(|x| x.0.clone()).collect::<Vec<_>>(),
}),
pallet_session: Some(SessionConfig {
keys: initial_authorities
.iter()
.map(|x| (x.0.clone(), x.0.clone(), session_keys(x.1.clone(), x.2.clone())))
.collect::<Vec<_>>(),
}),
pallet_aura: Some(AuraConfig {
authorities: vec![],
}),
pallet_grandpa: Some(GrandpaConfig {
authorities: vec![],
}),
pallet_sudo: Some(SudoConfig {
// Assign network admin rights.
key: root_key,
}),
pallet_node_authorization: Some(NodeAuthorizationConfig {
nodes: initial_authorized_nodes.iter().map(|x| (x.0.clone(), x.1.clone())).collect(),
}),
pallet_lo_authority_list: Some(LoAuthorityListConfig {
legal_officers: legal_officers.iter().map(|x| x.clone()).collect(),
})
}
}
fn default_properties() -> sc_service::Properties {
let mut props : sc_service::Properties = sc_service::Properties::new();
props.insert("tokenSymbol".to_string(), json!("LGNT"));
props.insert("tokenDecimals".to_string(), json!(18));
return props;
}
| session_keys |
test_cli20_routertype.py | # Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import mock
from neutronclient.common import exceptions
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
from networking_cisco.neutronclient import routertype
class CLITestV20RouterType(test_cli20.CLITestV20Base):
def setUp(self):
# need to mock before super because extensions loaded on instantiation
self._mock_extension_loading()
super(CLITestV20RouterType, self).setUp()
self.non_admin_status_resources.append('routertype')
def _create_patch(self, name, func=None):
patcher = mock.patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def _mock_extension_loading(self):
ext_pkg = 'neutronclient.common.extension'
contrib = self._create_patch(ext_pkg + '._discover_via_entry_points')
contrib.return_value = [("routertype", routertype)]
return contrib
def test_ext_cmd_loaded(self):
shell.NeutronShell('2.0')
ext_cmd = {'cisco-router-type-list': routertype.RouterTypeList,
'cisco-router-type-create': routertype.RouterTypeCreate,
'cisco-router-type-update': routertype.RouterTypeUpdate,
'cisco-router-type-delete': routertype.RouterTypeDelete,
'cisco-router-type-show': routertype.RouterTypeShow}
self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
def test_ext_cmd_help_doc_with_extension_name(self):
shell.NeutronShell('2.0')
ext_cmd = {'cisco-router-type-list': routertype.RouterTypeList,
'cisco-router-type-create': routertype.RouterTypeCreate,
'cisco-router-type-update': routertype.RouterTypeUpdate,
'cisco-router-type-delete': routertype.RouterTypeDelete,
'cisco-router-type-show': routertype.RouterTypeShow}
self.assertDictContainsSubset(ext_cmd, shell.COMMANDS['2.0'])
for item in ext_cmd:
cmdcls = shell.COMMANDS['2.0'].get(item)
self.assertTrue(cmdcls.__doc__.startswith("[routertype]"))
def test_create_router_type(self):
"""Create router type."""
resource = 'routertype'
cmd = routertype.RouterTypeCreate(test_cli20.MyApp(sys.stdout), None)
template = 'Template 1'
scheduler = 'my.scheduler:class_name'
plugin_driver = 'my.plugin.driver:class_name'
svc_helper = 'my.service.helper:class_name'
agent_driver = 'my.agent.driver:class_name'
myid = 'myid'
args = [template, scheduler, plugin_driver, svc_helper, agent_driver]
position_names = ['template_id', 'scheduler', 'driver',
'cfg_agent_service_helper', 'cfg_agent_driver']
position_values = [template, scheduler, plugin_driver, svc_helper,
agent_driver]
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values)
def test_create_router_type_id(self):
"""Create router type: --id this_id myname."""
resource = 'routertype'
cmd = routertype.RouterTypeCreate(test_cli20.MyApp(sys.stdout), None)
template = 'Template 1'
scheduler = 'my.scheduler:class_name'
plugin_driver = 'my.plugin.driver:class_name'
svc_helper = 'my.service.helper:class_name'
agent_driver = 'my.agent.driver:class_name'
myid = 'myid'
args = ['--id', myid, template, scheduler, plugin_driver,
svc_helper, agent_driver]
position_names = ['template_id', 'scheduler', 'driver',
'cfg_agent_service_helper', 'cfg_agent_driver']
position_values = [template, scheduler, plugin_driver, svc_helper,
agent_driver]
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values,
id=myid)
def test_create_router_type_tenant(self):
"""Create router type: --tenant_id tenantid myname."""
resource = 'routertype'
cmd = routertype.RouterTypeCreate(test_cli20.MyApp(sys.stdout), None)
template = 'Template 1'
scheduler = 'my.scheduler:class_name'
plugin_driver = 'my.plugin.driver:class_name'
svc_helper = 'my.service.helper:class_name'
agent_driver = 'my.agent.driver:class_name'
myid = 'myid'
args = ['--tenant_id', 'tenantid', template, scheduler, plugin_driver,
svc_helper, agent_driver]
position_names = ['template_id', 'scheduler', 'driver',
'cfg_agent_service_helper', 'cfg_agent_driver']
position_values = [template, scheduler, plugin_driver, svc_helper,
agent_driver]
self._test_create_resource(resource, cmd, None, myid, args,
position_names, position_values,
tenant_id='tenantid')
def _test_create_router_type_optional_args(
self, name=None, desc=None, ha_enabled=None, haenabled=None,
unshared=None, slot_need=None, slotneed=None):
resource = 'routertype'
cmd = routertype.RouterTypeCreate(test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
template = 'Template 1'
scheduler = 'my.scheduler:class_name'
plugin_driver = 'my.plugin.driver:class_name'
svc_helper = 'my.service.helper:class_name'
agent_driver = 'my.agent.driver:class_name'
args = []
expected = {}
if desc is not None:
args += ['--description', desc]
expected['description'] = desc
if ha_enabled is not None:
args += ['--ha_enabled']
expected['ha_enabled_by_default'] = True
if haenabled is not None:
args += ['--ha-enabled']
expected['ha_enabled_by_default'] = True
if unshared is not None:
args += ['--unshared']
expected['shared'] = False
if slot_need is not None:
args += ['--slot_need', slot_need]
expected['slot_need'] = slot_need
if slotneed is not None:
args += ['--slot-need', slotneed]
expected['slot_need'] = slotneed
position_names = ['template_id', 'scheduler', 'driver',
'cfg_agent_service_helper', 'cfg_agent_driver']
position_values = [template, scheduler, plugin_driver, svc_helper,
agent_driver]
for p_v in position_values:
args.append(p_v)
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
**expected)
def test_create_router_type_name(self):
self._test_create_router_type_optional_args('some name')
def test_create_router_type_description(self):
self._test_create_router_type_optional_args(desc='some description')
def test_create_router_type_ha(self):
self._test_create_router_type_optional_args(ha_enabled=True)
self._test_create_router_type_optional_args(haenabled=True)
def test_create_router_type_unshared(self):
self._test_create_router_type_optional_args(unshared=False)
def test_create_router_type_slots(self):
self._test_create_router_type_optional_args(slot_need='5')
self._test_create_router_type_optional_args(slotneed='5')
def test_create_router_type_full(self):
self._test_create_router_type_optional_args(
'some name', desc='some description', ha_enabled=True,
unshared=False, slot_need='5')
self._test_create_router_type_optional_args(
'some name', desc='some description', haenabled=True,
unshared=False, slotneed='5')
def test_list_router_types_detail(self):
"""list routers: -D."""
resources = "routertypes"
cmd = routertype.RouterTypeList(test_cli20.MyApp(sys.stdout), None)
response_contents = [{
self.id_field: 'myid1', 'name': 'router_type_1_name',
'description': 'fast router', 'template_id': 'templ_id_1'}, {
self.id_field: 'myid2', 'name': 'router_type_2_name',
'description': 'faster router', 'template_id': 'templ_id_2'}]
self._test_list_resources(resources, cmd, True,
response_contents=response_contents)
def test_list_router_types_sort(self):
"""list routertypes: --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "routertypes"
cmd = routertype.RouterTypeList(test_cli20.MyApp(sys.stdout), None)
response_contents = [{
self.id_field: 'myid1', 'name': 'router_type_1_name',
'description': 'fast router', 'template_id': 'templ_id_1'}, {
self.id_field: 'myid2', 'name': 'router_type_2_name',
'description': 'faster router', 'template_id': 'templ_id_2'}]
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"],
response_contents=response_contents)
def test_list_router_types_limit(self):
"""list routertypes: -P."""
resources = "routertypes"
cmd = routertype.RouterTypeList(test_cli20.MyApp(sys.stdout), None)
response_contents = [{
self.id_field: 'myid1', 'name': 'router_type_1_name',
'description': 'fast router', 'template_id': 'templ_id_1'}, {
self.id_field: 'myid2', 'name': 'router_type_2_name',
'description': 'faster router', 'template_id': 'templ_id_2'}]
self._test_list_resources(resources, cmd, page_size=1000,
response_contents=response_contents)
def test_update_router_type_exception(self):
"""Update routertype: myid."""
resource = 'routertype'
cmd = routertype.RouterTypeUpdate(test_cli20.MyApp(sys.stdout), None)
self.assertRaises(exceptions.CommandError, self._test_update_resource,
resource, cmd, 'myid', ['myid'], {})
def | (
self, name=None, desc=None, ha_enabled=None, haenabled=None,
ha_disabled=None, hadisabled=None, unshared=None, shared=None,
slot_need=None, slotneed=None):
resource = 'routertype'
cmd = routertype.RouterTypeUpdate(test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = [myid]
expected = {}
if name is not None:
args += ['--name', name]
expected['name'] = name
if desc is not None:
args += ['--description', desc]
expected['description'] = desc
if ha_enabled is not None:
args += ['--ha_enabled']
expected['ha_enabled_by_default'] = True
if haenabled is not None:
args += ['--ha-enabled']
expected['ha_enabled_by_default'] = True
if ha_disabled is not None:
args += ['--ha_disabled']
expected['ha_enabled_by_default'] = False
if hadisabled is not None:
args += ['--ha-disabled']
expected['ha_enabled_by_default'] = False
if shared is not None:
args += ['--shared']
expected['shared'] = True
if unshared is not None:
args += ['--unshared']
expected['shared'] = False
if slot_need is not None:
args += ['--slot_need', slot_need]
expected['slot_need'] = slot_need
if slotneed is not None:
args += ['--slot-need', slotneed]
expected['slot_need'] = slotneed
self._test_update_resource(resource, cmd, myid, args, expected)
def test_update_router_type_name(self):
"""Update routertype: myid --name myname."""
self._test_update_router_type(name='myname')
def test_update_router_type_description(self):
self._test_update_router_type(desc='some description')
def test_update_router_type_ha(self):
self._test_update_router_type(ha_enabled=True)
self._test_update_router_type(haenabled=True)
self._test_update_router_type(ha_disabled=True)
self._test_update_router_type(hadisabled=True)
def test_update_router_type_sharing(self):
self._test_update_router_type(shared=True)
self._test_update_router_type(unshared=True)
def test_update_router_type_slots(self):
self._test_update_router_type(slot_need='5')
self._test_update_router_type(slotneed='5')
def test_update_router_type_full(self):
self._test_update_router_type(name='myname', desc='some description',
ha_enabled=True, shared=True,
slot_need='5')
self._test_update_router_type(name='myname', desc='some description',
haenabled=True, shared=True,
slotneed='5')
self._test_update_router_type(name='myname', desc='some description',
ha_disabled=True, unshared=True,
slot_need='5')
self._test_update_router_type(name='myname', desc='some description',
hadisabled=True, unshared=True,
slotneed='5')
def test_delete_router_type(self):
"""Delete routertype: myid."""
resource = 'routertype'
cmd = routertype.RouterTypeDelete(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
def test_show_router_type(self):
"""Show routertype: myid."""
resource = 'routertype'
cmd = routertype.RouterTypeShow(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args,
['id', 'name'])
| _test_update_router_type |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.