hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 1,
"code_window": [
"\tkubecontainer \"k8s.io/kubernetes/pkg/kubelet/container\"\n",
"\t\"k8s.io/kubernetes/pkg/types\"\n",
"\t\"k8s.io/kubernetes/pkg/util\"\n",
"\tnodeutil \"k8s.io/kubernetes/pkg/util/node\"\n",
")\n",
"\n",
"// getRootDir returns the full path to the directory under which kubelet can\n",
"// store data. These functions are useful to pass interfaces to other modules\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tvolumeutil \"k8s.io/kubernetes/pkg/volume/util\"\n"
],
"file_path": "pkg/kubelet/kubelet_getters.go",
"type": "add",
"edit_start_line_idx": 32
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"io/ioutil"
"net"
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/cmd/kubelet/app/options"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
nodeutil "k8s.io/kubernetes/pkg/util/node"
)
// getRootDir returns the full path to the directory under which kubelet can
// store data. These functions are useful to pass interfaces to other modules
// that may need to know where to write data without getting a whole kubelet
// instance.
func (kl *Kubelet) getRootDir() string {
return kl.rootDirectory
}
// getPodsDir returns the full path to the directory under which pod
// directories are created.
func (kl *Kubelet) getPodsDir() string {
return path.Join(kl.getRootDir(), options.DefaultKubeletPodsDirName)
}
// getPluginsDir returns the full path to the directory under which plugin
// directories are created. Plugins can use these directories for data that
// they need to persist. Plugins should create subdirectories under this named
// after their own names.
func (kl *Kubelet) getPluginsDir() string {
return path.Join(kl.getRootDir(), options.DefaultKubeletPluginsDirName)
}
// getPluginDir returns a data directory name for a given plugin name.
// Plugins can use these directories to store data that they need to persist.
// For per-pod plugin data, see getPodPluginDir.
func (kl *Kubelet) getPluginDir(pluginName string) string {
return path.Join(kl.getPluginsDir(), pluginName)
}
// GetPodDir returns the full path to the per-pod data directory for the
// specified pod. This directory may not exist if the pod does not exist.
func (kl *Kubelet) GetPodDir(podUID types.UID) string {
return kl.getPodDir(podUID)
}
// getPodDir returns the full path to the per-pod directory for the pod with
// the given UID.
func (kl *Kubelet) getPodDir(podUID types.UID) string {
// Backwards compat. The "old" stuff should be removed before 1.0
// release. The thinking here is this:
// !old && !new = use new
// !old && new = use new
// old && !new = use old
// old && new = use new (but warn)
oldPath := path.Join(kl.getRootDir(), string(podUID))
oldExists := dirExists(oldPath)
newPath := path.Join(kl.getPodsDir(), string(podUID))
newExists := dirExists(newPath)
if oldExists && !newExists {
return oldPath
}
if oldExists {
glog.Warningf("Data dir for pod %q exists in both old and new form, using new", podUID)
}
return newPath
}
// getPodVolumesDir returns the full path to the per-pod data directory under
// which volumes are created for the specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodVolumesDir(podUID types.UID) string {
return path.Join(kl.getPodDir(podUID), options.DefaultKubeletVolumesDirName)
}
// getPodVolumeDir returns the full path to the directory which represents the
// named volume under the named plugin for specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
return path.Join(kl.getPodVolumesDir(podUID), pluginName, volumeName)
}
// getPodPluginsDir returns the full path to the per-pod data directory under
// which plugins may store data for the specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodPluginsDir(podUID types.UID) string {
return path.Join(kl.getPodDir(podUID), options.DefaultKubeletPluginsDirName)
}
// getPodPluginDir returns a data directory name for a given plugin name for a
// given pod UID. Plugins can use these directories to store data that they
// need to persist. For non-per-pod plugin data, see getPluginDir.
func (kl *Kubelet) getPodPluginDir(podUID types.UID, pluginName string) string {
return path.Join(kl.getPodPluginsDir(podUID), pluginName)
}
// getPodContainerDir returns the full path to the per-pod data directory under
// which container data is held for the specified pod. This directory may not
// exist if the pod or container does not exist.
func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string {
// Backwards compat. The "old" stuff should be removed before 1.0
// release. The thinking here is this:
// !old && !new = use new
// !old && new = use new
// old && !new = use old
// old && new = use new (but warn)
oldPath := path.Join(kl.getPodDir(podUID), ctrName)
oldExists := dirExists(oldPath)
newPath := path.Join(kl.getPodDir(podUID), options.DefaultKubeletContainersDirName, ctrName)
newExists := dirExists(newPath)
if oldExists && !newExists {
return oldPath
}
if oldExists {
glog.Warningf("Data dir for pod %q, container %q exists in both old and new form, using new", podUID, ctrName)
}
return newPath
}
// GetPods returns all pods bound to the kubelet and their spec, and the mirror
// pods.
func (kl *Kubelet) GetPods() []*v1.Pod {
return kl.podManager.GetPods()
}
// GetRunningPods returns all pods running on kubelet from looking at the
// container runtime cache. This function converts kubecontainer.Pod to
// v1.Pod, so only the fields that exist in both kubecontainer.Pod and
// v1.Pod are considered meaningful.
func (kl *Kubelet) GetRunningPods() ([]*v1.Pod, error) {
pods, err := kl.runtimeCache.GetPods()
if err != nil {
return nil, err
}
apiPods := make([]*v1.Pod, 0, len(pods))
for _, pod := range pods {
apiPods = append(apiPods, pod.ToAPIPod())
}
return apiPods, nil
}
// GetPodByFullName gets the pod with the given 'full' name, which
// incorporates the namespace as well as whether the pod was found.
func (kl *Kubelet) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
return kl.podManager.GetPodByFullName(podFullName)
}
// GetPodByName provides the first pod that matches namespace and name, as well
// as whether the pod was found.
func (kl *Kubelet) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return kl.podManager.GetPodByName(namespace, name)
}
// GetHostname Returns the hostname as the kubelet sees it.
func (kl *Kubelet) GetHostname() string {
return kl.hostname
}
// GetRuntime returns the current Runtime implementation in use by the kubelet. This func
// is exported to simplify integration with third party kubelet extensions (e.g. kubernetes-mesos).
func (kl *Kubelet) GetRuntime() kubecontainer.Runtime {
return kl.containerRuntime
}
// GetNode returns the node info for the configured node name of this Kubelet.
func (kl *Kubelet) GetNode() (*v1.Node, error) {
if kl.standaloneMode {
return kl.initialNode()
}
return kl.nodeInfo.GetNodeInfo(string(kl.nodeName))
}
// getNodeAnyWay() must return a *v1.Node which is required by RunGeneralPredicates().
// The *v1.Node is obtained as follows:
// Return kubelet's nodeInfo for this node, except on error or if in standalone mode,
// in which case return a manufactured nodeInfo representing a node with no pods,
// zero capacity, and the default labels.
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
if !kl.standaloneMode {
if n, err := kl.nodeInfo.GetNodeInfo(string(kl.nodeName)); err == nil {
return n, nil
}
}
return kl.initialNode()
}
// GetNodeConfig returns the container manager node config.
func (kl *Kubelet) GetNodeConfig() cm.NodeConfig {
return kl.containerManager.GetNodeConfig()
}
// Returns host IP or nil in case of error.
func (kl *Kubelet) GetHostIP() (net.IP, error) {
node, err := kl.GetNode()
if err != nil {
return nil, fmt.Errorf("cannot get node: %v", err)
}
return nodeutil.GetNodeHostIP(node)
}
// getHostIPAnyway attempts to return the host IP from kubelet's nodeInfo, or
// the initialNode.
func (kl *Kubelet) getHostIPAnyWay() (net.IP, error) {
node, err := kl.getNodeAnyWay()
if err != nil {
return nil, err
}
return nodeutil.GetNodeHostIP(node)
}
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
func (kl *Kubelet) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return kl.volumeManager.GetExtraSupplementalGroupsForPod(pod)
}
// getPodVolumePathListFromDisk returns a list of the volume paths by reading the
// volume directories for the given pod from the disk.
func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, error) {
volumes := []string{}
podVolDir := kl.getPodVolumesDir(podUID)
volumePluginDirs, err := ioutil.ReadDir(podVolDir)
if err != nil {
glog.Errorf("Could not read directory %s: %v", podVolDir, err)
return volumes, err
}
for _, volumePluginDir := range volumePluginDirs {
volumePluginName := volumePluginDir.Name()
volumePluginPath := path.Join(podVolDir, volumePluginName)
volumeDirs, err := util.ReadDirNoStat(volumePluginPath)
if err != nil {
return volumes, fmt.Errorf("Could not read directory %s: %v", volumePluginPath, err)
}
for _, volumeDir := range volumeDirs {
volumes = append(volumes, path.Join(volumePluginPath, volumeDir))
}
}
return volumes, nil
}
| pkg/kubelet/kubelet_getters.go | 1 | https://github.com/kubernetes/kubernetes/commit/3fbf68ef6847264f8dcf1934951ef1cd5548d2a8 | [
0.997717022895813,
0.149940624833107,
0.00016988308925647289,
0.0008062288397923112,
0.35184040665626526
] |
{
"id": 1,
"code_window": [
"\tkubecontainer \"k8s.io/kubernetes/pkg/kubelet/container\"\n",
"\t\"k8s.io/kubernetes/pkg/types\"\n",
"\t\"k8s.io/kubernetes/pkg/util\"\n",
"\tnodeutil \"k8s.io/kubernetes/pkg/util/node\"\n",
")\n",
"\n",
"// getRootDir returns the full path to the directory under which kubelet can\n",
"// store data. These functions are useful to pass interfaces to other modules\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tvolumeutil \"k8s.io/kubernetes/pkg/volume/util\"\n"
],
"file_path": "pkg/kubelet/kubelet_getters.go",
"type": "add",
"edit_start_line_idx": 32
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package cache is a client-side caching mechanism. It is useful for
// reducing the number of server calls you'd otherwise need to make.
// Reflector watches a server and updates a Store. Two stores are provided;
// one that simply caches objects (for example, to allow a scheduler to
// list currently available nodes), and one that additionally acts as
// a FIFO queue (for example, to allow a scheduler to process incoming
// pods).
package cache // import "k8s.io/kubernetes/pkg/client/cache"
| pkg/client/cache/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/3fbf68ef6847264f8dcf1934951ef1cd5548d2a8 | [
0.0006637618644163013,
0.00033642572816461325,
0.00016968962154351175,
0.00017582569853402674,
0.0002314751618541777
] |
{
"id": 1,
"code_window": [
"\tkubecontainer \"k8s.io/kubernetes/pkg/kubelet/container\"\n",
"\t\"k8s.io/kubernetes/pkg/types\"\n",
"\t\"k8s.io/kubernetes/pkg/util\"\n",
"\tnodeutil \"k8s.io/kubernetes/pkg/util/node\"\n",
")\n",
"\n",
"// getRootDir returns the full path to the directory under which kubelet can\n",
"// store data. These functions are useful to pass interfaces to other modules\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tvolumeutil \"k8s.io/kubernetes/pkg/volume/util\"\n"
],
"file_path": "pkg/kubelet/kubelet_getters.go",
"type": "add",
"edit_start_line_idx": 32
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// package common holds shared codes and types between open API code generator and spec generator.
package common
| staging/src/k8s.io/client-go/pkg/genericapiserver/openapi/common/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/3fbf68ef6847264f8dcf1934951ef1cd5548d2a8 | [
0.0001767417707014829,
0.00017631685477681458,
0.00017589193885214627,
0.00017631685477681458,
4.2491592466831207e-7
] |
{
"id": 1,
"code_window": [
"\tkubecontainer \"k8s.io/kubernetes/pkg/kubelet/container\"\n",
"\t\"k8s.io/kubernetes/pkg/types\"\n",
"\t\"k8s.io/kubernetes/pkg/util\"\n",
"\tnodeutil \"k8s.io/kubernetes/pkg/util/node\"\n",
")\n",
"\n",
"// getRootDir returns the full path to the directory under which kubelet can\n",
"// store data. These functions are useful to pass interfaces to other modules\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tvolumeutil \"k8s.io/kubernetes/pkg/volume/util\"\n"
],
"file_path": "pkg/kubelet/kubelet_getters.go",
"type": "add",
"edit_start_line_idx": 32
} | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// and between processes.
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// Contexts.
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
func Background() Context {
return background
}
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
func TODO() Context {
return todo
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()
| vendor/golang.org/x/net/context/context.go | 0 | https://github.com/kubernetes/kubernetes/commit/3fbf68ef6847264f8dcf1934951ef1cd5548d2a8 | [
0.0007041056524030864,
0.0002359258505748585,
0.0001615736837266013,
0.00016698594845365733,
0.00017497182125225663
] |
{
"id": 2,
"code_window": [
"// getPodVolumePathListFromDisk returns a list of the volume paths by reading the\n",
"// volume directories for the given pod from the disk.\n",
"func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, error) {\n",
"\tvolumes := []string{}\n",
"\tpodVolDir := kl.getPodVolumesDir(podUID)\n",
"\tvolumePluginDirs, err := ioutil.ReadDir(podVolDir)\n",
"\tif err != nil {\n",
"\t\tglog.Errorf(\"Could not read directory %s: %v\", podVolDir, err)\n",
"\t\treturn volumes, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tif pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil {\n",
"\t\treturn volumes, fmt.Errorf(\"Error checking if path %q exists: %v\", podVolDir, pathErr)\n",
"\t} else if !pathExists {\n",
"\t\tglog.Warningf(\"Warning: path %q does not exist: %q\", podVolDir)\n",
"\t\treturn volumes, nil\n",
"\t}\n",
"\n"
],
"file_path": "pkg/kubelet/kubelet_getters.go",
"type": "add",
"edit_start_line_idx": 246
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"io/ioutil"
"net"
"path"
"github.com/golang/glog"
"k8s.io/kubernetes/cmd/kubelet/app/options"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util"
nodeutil "k8s.io/kubernetes/pkg/util/node"
)
// getRootDir returns the full path to the directory under which kubelet can
// store data. These functions are useful to pass interfaces to other modules
// that may need to know where to write data without getting a whole kubelet
// instance.
func (kl *Kubelet) getRootDir() string {
return kl.rootDirectory
}
// getPodsDir returns the full path to the directory under which pod
// directories are created.
func (kl *Kubelet) getPodsDir() string {
return path.Join(kl.getRootDir(), options.DefaultKubeletPodsDirName)
}
// getPluginsDir returns the full path to the directory under which plugin
// directories are created. Plugins can use these directories for data that
// they need to persist. Plugins should create subdirectories under this named
// after their own names.
func (kl *Kubelet) getPluginsDir() string {
return path.Join(kl.getRootDir(), options.DefaultKubeletPluginsDirName)
}
// getPluginDir returns a data directory name for a given plugin name.
// Plugins can use these directories to store data that they need to persist.
// For per-pod plugin data, see getPodPluginDir.
func (kl *Kubelet) getPluginDir(pluginName string) string {
return path.Join(kl.getPluginsDir(), pluginName)
}
// GetPodDir returns the full path to the per-pod data directory for the
// specified pod. This directory may not exist if the pod does not exist.
func (kl *Kubelet) GetPodDir(podUID types.UID) string {
return kl.getPodDir(podUID)
}
// getPodDir returns the full path to the per-pod directory for the pod with
// the given UID.
func (kl *Kubelet) getPodDir(podUID types.UID) string {
// Backwards compat. The "old" stuff should be removed before 1.0
// release. The thinking here is this:
// !old && !new = use new
// !old && new = use new
// old && !new = use old
// old && new = use new (but warn)
oldPath := path.Join(kl.getRootDir(), string(podUID))
oldExists := dirExists(oldPath)
newPath := path.Join(kl.getPodsDir(), string(podUID))
newExists := dirExists(newPath)
if oldExists && !newExists {
return oldPath
}
if oldExists {
glog.Warningf("Data dir for pod %q exists in both old and new form, using new", podUID)
}
return newPath
}
// getPodVolumesDir returns the full path to the per-pod data directory under
// which volumes are created for the specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodVolumesDir(podUID types.UID) string {
return path.Join(kl.getPodDir(podUID), options.DefaultKubeletVolumesDirName)
}
// getPodVolumeDir returns the full path to the directory which represents the
// named volume under the named plugin for specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
return path.Join(kl.getPodVolumesDir(podUID), pluginName, volumeName)
}
// getPodPluginsDir returns the full path to the per-pod data directory under
// which plugins may store data for the specified pod. This directory may not
// exist if the pod does not exist.
func (kl *Kubelet) getPodPluginsDir(podUID types.UID) string {
return path.Join(kl.getPodDir(podUID), options.DefaultKubeletPluginsDirName)
}
// getPodPluginDir returns a data directory name for a given plugin name for a
// given pod UID. Plugins can use these directories to store data that they
// need to persist. For non-per-pod plugin data, see getPluginDir.
func (kl *Kubelet) getPodPluginDir(podUID types.UID, pluginName string) string {
return path.Join(kl.getPodPluginsDir(podUID), pluginName)
}
// getPodContainerDir returns the full path to the per-pod data directory under
// which container data is held for the specified pod. This directory may not
// exist if the pod or container does not exist.
func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string {
// Backwards compat. The "old" stuff should be removed before 1.0
// release. The thinking here is this:
// !old && !new = use new
// !old && new = use new
// old && !new = use old
// old && new = use new (but warn)
oldPath := path.Join(kl.getPodDir(podUID), ctrName)
oldExists := dirExists(oldPath)
newPath := path.Join(kl.getPodDir(podUID), options.DefaultKubeletContainersDirName, ctrName)
newExists := dirExists(newPath)
if oldExists && !newExists {
return oldPath
}
if oldExists {
glog.Warningf("Data dir for pod %q, container %q exists in both old and new form, using new", podUID, ctrName)
}
return newPath
}
// GetPods returns all pods bound to the kubelet and their spec, and the mirror
// pods.
func (kl *Kubelet) GetPods() []*v1.Pod {
return kl.podManager.GetPods()
}
// GetRunningPods returns all pods running on kubelet from looking at the
// container runtime cache. This function converts kubecontainer.Pod to
// v1.Pod, so only the fields that exist in both kubecontainer.Pod and
// v1.Pod are considered meaningful.
func (kl *Kubelet) GetRunningPods() ([]*v1.Pod, error) {
pods, err := kl.runtimeCache.GetPods()
if err != nil {
return nil, err
}
apiPods := make([]*v1.Pod, 0, len(pods))
for _, pod := range pods {
apiPods = append(apiPods, pod.ToAPIPod())
}
return apiPods, nil
}
// GetPodByFullName gets the pod with the given 'full' name, which
// incorporates the namespace as well as whether the pod was found.
func (kl *Kubelet) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
return kl.podManager.GetPodByFullName(podFullName)
}
// GetPodByName provides the first pod that matches namespace and name, as well
// as whether the pod was found.
func (kl *Kubelet) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return kl.podManager.GetPodByName(namespace, name)
}
// GetHostname Returns the hostname as the kubelet sees it.
func (kl *Kubelet) GetHostname() string {
return kl.hostname
}
// GetRuntime returns the current Runtime implementation in use by the kubelet. This func
// is exported to simplify integration with third party kubelet extensions (e.g. kubernetes-mesos).
func (kl *Kubelet) GetRuntime() kubecontainer.Runtime {
return kl.containerRuntime
}
// GetNode returns the node info for the configured node name of this Kubelet.
func (kl *Kubelet) GetNode() (*v1.Node, error) {
if kl.standaloneMode {
return kl.initialNode()
}
return kl.nodeInfo.GetNodeInfo(string(kl.nodeName))
}
// getNodeAnyWay() must return a *v1.Node which is required by RunGeneralPredicates().
// The *v1.Node is obtained as follows:
// Return kubelet's nodeInfo for this node, except on error or if in standalone mode,
// in which case return a manufactured nodeInfo representing a node with no pods,
// zero capacity, and the default labels.
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
if !kl.standaloneMode {
if n, err := kl.nodeInfo.GetNodeInfo(string(kl.nodeName)); err == nil {
return n, nil
}
}
return kl.initialNode()
}
// GetNodeConfig returns the container manager node config.
func (kl *Kubelet) GetNodeConfig() cm.NodeConfig {
return kl.containerManager.GetNodeConfig()
}
// Returns host IP or nil in case of error.
func (kl *Kubelet) GetHostIP() (net.IP, error) {
node, err := kl.GetNode()
if err != nil {
return nil, fmt.Errorf("cannot get node: %v", err)
}
return nodeutil.GetNodeHostIP(node)
}
// getHostIPAnyway attempts to return the host IP from kubelet's nodeInfo, or
// the initialNode.
func (kl *Kubelet) getHostIPAnyWay() (net.IP, error) {
node, err := kl.getNodeAnyWay()
if err != nil {
return nil, err
}
return nodeutil.GetNodeHostIP(node)
}
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
func (kl *Kubelet) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return kl.volumeManager.GetExtraSupplementalGroupsForPod(pod)
}
// getPodVolumePathListFromDisk returns a list of the volume paths by reading the
// volume directories for the given pod from the disk.
func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, error) {
volumes := []string{}
podVolDir := kl.getPodVolumesDir(podUID)
volumePluginDirs, err := ioutil.ReadDir(podVolDir)
if err != nil {
glog.Errorf("Could not read directory %s: %v", podVolDir, err)
return volumes, err
}
for _, volumePluginDir := range volumePluginDirs {
volumePluginName := volumePluginDir.Name()
volumePluginPath := path.Join(podVolDir, volumePluginName)
volumeDirs, err := util.ReadDirNoStat(volumePluginPath)
if err != nil {
return volumes, fmt.Errorf("Could not read directory %s: %v", volumePluginPath, err)
}
for _, volumeDir := range volumeDirs {
volumes = append(volumes, path.Join(volumePluginPath, volumeDir))
}
}
return volumes, nil
}
| pkg/kubelet/kubelet_getters.go | 1 | https://github.com/kubernetes/kubernetes/commit/3fbf68ef6847264f8dcf1934951ef1cd5548d2a8 | [
0.9992824196815491,
0.12200217694044113,
0.00017009623115882277,
0.004293490666896105,
0.30569469928741455
] |
{
"id": 2,
"code_window": [
"// getPodVolumePathListFromDisk returns a list of the volume paths by reading the\n",
"// volume directories for the given pod from the disk.\n",
"func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, error) {\n",
"\tvolumes := []string{}\n",
"\tpodVolDir := kl.getPodVolumesDir(podUID)\n",
"\tvolumePluginDirs, err := ioutil.ReadDir(podVolDir)\n",
"\tif err != nil {\n",
"\t\tglog.Errorf(\"Could not read directory %s: %v\", podVolDir, err)\n",
"\t\treturn volumes, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tif pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil {\n",
"\t\treturn volumes, fmt.Errorf(\"Error checking if path %q exists: %v\", podVolDir, pathErr)\n",
"\t} else if !pathExists {\n",
"\t\tglog.Warningf(\"Warning: path %q does not exist: %q\", podVolDir)\n",
"\t\treturn volumes, nil\n",
"\t}\n",
"\n"
],
"file_path": "pkg/kubelet/kubelet_getters.go",
"type": "add",
"edit_start_line_idx": 246
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2alpha1
import (
"k8s.io/client-go/pkg/api/v1"
metav1 "k8s.io/client-go/pkg/apis/meta/v1"
)
// +genclient=true
// Job represents the configuration of a single job.
type Job struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
// +optional
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec is a structure defining the expected behavior of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
// +optional
Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is a structure describing current status of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
// +optional
Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// JobList is a collection of jobs.
type JobList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Job.
Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// JobTemplate describes a template for creating copies of a predefined pod.
type JobTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
// +optional
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines jobs that will be created from this template
// http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
// +optional
Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// JobTemplateSpec describes the data a Job should have when created from a template
type JobTemplateSpec struct {
// Standard object's metadata of the jobs created from this template.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
// +optional
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
// +optional
Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// JobSpec describes how the job execution will look like.
type JobSpec struct {
// Parallelism specifies the maximum desired number of pods the job should
// run at any given time. The actual number of pods running in steady state will
// be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism),
// i.e. when the work left to do is less than max parallelism.
// More info: http://kubernetes.io/docs/user-guide/jobs
// +optional
Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"`
// Completions specifies the desired number of successfully finished pods the
// job should be run with. Setting to nil means that the success of any
// pod signals the success of all pods, and allows parallelism to have any positive
// value. Setting to 1 means that parallelism is limited to 1 and the success of that
// pod signals the success of the job.
// More info: http://kubernetes.io/docs/user-guide/jobs
// +optional
Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"`
// Optional duration in seconds relative to the startTime that the job may be active
// before the system tries to terminate it; value must be positive integer
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"`
// Selector is a label query over pods that should match the pod count.
// Normally, the system sets this field for you.
// More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// ManualSelector controls generation of pod labels and pod selectors.
// Leave `manualSelector` unset unless you are certain what you are doing.
// When false or unset, the system pick labels unique to this job
// and appends those labels to the pod template. When true,
// the user is responsible for picking unique labels and specifying
// the selector. Failure to pick a unique label may cause this
// and other jobs to not function correctly. However, You may see
// `manualSelector=true` in jobs that were created with the old `extensions/v1beta1`
// API.
// More info: http://releases.k8s.io/HEAD/docs/design/selector-generation.md
// +optional
ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"`
// Template is the object that describes the pod that will be created when
// executing a job.
// More info: http://kubernetes.io/docs/user-guide/jobs
Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"`
}
// JobStatus represents the current state of a Job.
type JobStatus struct {
// Conditions represent the latest available observations of an object's current state.
// More info: http://kubernetes.io/docs/user-guide/jobs
// +optional
Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
// StartTime represents time when the job was acknowledged by the Job Manager.
// It is not guaranteed to be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"`
// CompletionTime represents time when the job was completed. It is not guaranteed to
// be set in happens-before order across separate operations.
// It is represented in RFC3339 form and is in UTC.
// +optional
CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"`
// Active is the number of actively running pods.
// +optional
Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"`
// Succeeded is the number of pods which reached Phase Succeeded.
// +optional
Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"`
// Failed is the number of pods which reached Phase Failed.
// +optional
Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"`
}
type JobConditionType string
// These are valid conditions of a job.
const (
// JobComplete means the job has completed its execution.
JobComplete JobConditionType = "Complete"
// JobFailed means the job has failed its execution.
JobFailed JobConditionType = "Failed"
)
// JobCondition describes current state of a job.
type JobCondition struct {
// Type of job condition, Complete or Failed.
Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"`
// Status of the condition, one of True, False, Unknown.
Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"`
// Last time the condition was checked.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// +genclient=true
// CronJob represents the configuration of a single cron job.
type CronJob struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
// +optional
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec is a structure defining the expected behavior of a job, including the schedule.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
// +optional
Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is a structure describing current status of a job.
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
// +optional
Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// CronJobList is a collection of cron jobs.
type CronJobList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of CronJob.
Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// CronJobSpec describes how the job execution will look like and when it will actually run.
type CronJobSpec struct {
// Schedule contains the schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.
Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"`
// Optional deadline in seconds for starting the job if it misses scheduled
// time for any reason. Missed jobs executions will be counted as failed ones.
// +optional
StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"`
// ConcurrencyPolicy specifies how to treat concurrent executions of a Job.
// +optional
ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"`
// Suspend flag tells the controller to suspend subsequent executions, it does
// not apply to already started executions. Defaults to false.
// +optional
Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"`
// JobTemplate is the object that describes the job that will be created when
// executing a CronJob.
JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"`
}
// ConcurrencyPolicy describes how the job will be handled.
// Only one of the following concurrent policies may be specified.
// If none of the following policies is specified, the default one
// is AllowConcurrent.
type ConcurrencyPolicy string
const (
// AllowConcurrent allows CronJobs to run concurrently.
AllowConcurrent ConcurrencyPolicy = "Allow"
// ForbidConcurrent forbids concurrent runs, skipping next run if previous
// hasn't finished yet.
ForbidConcurrent ConcurrencyPolicy = "Forbid"
// ReplaceConcurrent cancels currently running job and replaces it with a new one.
ReplaceConcurrent ConcurrencyPolicy = "Replace"
)
// CronJobStatus represents the current state of a cron job.
type CronJobStatus struct {
// Active holds pointers to currently running jobs.
// +optional
Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"`
// LastScheduleTime keeps information of when was the last time the job was successfully scheduled.
// +optional
LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"`
}
| staging/src/k8s.io/client-go/pkg/apis/batch/v2alpha1/types.go | 0 | https://github.com/kubernetes/kubernetes/commit/3fbf68ef6847264f8dcf1934951ef1cd5548d2a8 | [
0.00019466495723463595,
0.00017003175162244588,
0.00016389873053412884,
0.00016715774836484343,
0.000007067448677844368
] |
{
"id": 2,
"code_window": [
"// getPodVolumePathListFromDisk returns a list of the volume paths by reading the\n",
"// volume directories for the given pod from the disk.\n",
"func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, error) {\n",
"\tvolumes := []string{}\n",
"\tpodVolDir := kl.getPodVolumesDir(podUID)\n",
"\tvolumePluginDirs, err := ioutil.ReadDir(podVolDir)\n",
"\tif err != nil {\n",
"\t\tglog.Errorf(\"Could not read directory %s: %v\", podVolDir, err)\n",
"\t\treturn volumes, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tif pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil {\n",
"\t\treturn volumes, fmt.Errorf(\"Error checking if path %q exists: %v\", podVolDir, pathErr)\n",
"\t} else if !pathExists {\n",
"\t\tglog.Warningf(\"Warning: path %q does not exist: %q\", podVolDir)\n",
"\t\treturn volumes, nil\n",
"\t}\n",
"\n"
],
"file_path": "pkg/kubelet/kubelet_getters.go",
"type": "add",
"edit_start_line_idx": 246
} | package configs
import (
"bytes"
"encoding/json"
"fmt"
"os/exec"
"time"
"github.com/Sirupsen/logrus"
)
type Rlimit struct {
Type int `json:"type"`
Hard uint64 `json:"hard"`
Soft uint64 `json:"soft"`
}
// IDMap represents UID/GID Mappings for User Namespaces.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
// Seccomp represents syscall restrictions
// By default, only the native architecture of the kernel is allowed to be used
// for syscalls. Additional architectures can be added by specifying them in
// Architectures.
type Seccomp struct {
DefaultAction Action `json:"default_action"`
Architectures []string `json:"architectures"`
Syscalls []*Syscall `json:"syscalls"`
}
// Action is taken upon rule match in Seccomp
type Action int
const (
Kill Action = iota + 1
Errno
Trap
Allow
Trace
)
// Operator is a comparison operator to be used when matching syscall arguments in Seccomp
type Operator int
const (
EqualTo Operator = iota + 1
NotEqualTo
GreaterThan
GreaterThanOrEqualTo
LessThan
LessThanOrEqualTo
MaskEqualTo
)
// Arg is a rule to match a specific syscall argument in Seccomp
type Arg struct {
Index uint `json:"index"`
Value uint64 `json:"value"`
ValueTwo uint64 `json:"value_two"`
Op Operator `json:"op"`
}
// Syscall is a rule to match a syscall in Seccomp
type Syscall struct {
Name string `json:"name"`
Action Action `json:"action"`
Args []*Arg `json:"args"`
}
// TODO Windows. Many of these fields should be factored out into those parts
// which are common across platforms, and those which are platform specific.
// Config defines configuration options for executing a process inside a contained environment.
type Config struct {
// NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs
// This is a common option when the container is running in ramdisk
NoPivotRoot bool `json:"no_pivot_root"`
// ParentDeathSignal specifies the signal that is sent to the container's process in the case
// that the parent process dies.
ParentDeathSignal int `json:"parent_death_signal"`
// PivotDir allows a custom directory inside the container's root filesystem to be used as pivot, when NoPivotRoot is not set.
// When a custom PivotDir not set, a temporary dir inside the root filesystem will be used. The pivot dir needs to be writeable.
// This is required when using read only root filesystems. In these cases, a read/writeable path can be (bind) mounted somewhere inside the root filesystem to act as pivot.
PivotDir string `json:"pivot_dir"`
// Path to a directory containing the container's root filesystem.
Rootfs string `json:"rootfs"`
// Readonlyfs will remount the container's rootfs as readonly where only externally mounted
// bind mounts are writtable.
Readonlyfs bool `json:"readonlyfs"`
// Specifies the mount propagation flags to be applied to /.
RootPropagation int `json:"rootPropagation"`
// Mounts specify additional source and destination paths that will be mounted inside the container's
// rootfs and mount namespace if specified
Mounts []*Mount `json:"mounts"`
// The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well!
Devices []*Device `json:"devices"`
MountLabel string `json:"mount_label"`
// Hostname optionally sets the container's hostname if provided
Hostname string `json:"hostname"`
// Namespaces specifies the container's namespaces that it should setup when cloning the init process
// If a namespace is not provided that namespace is shared from the container's parent process
Namespaces Namespaces `json:"namespaces"`
// Capabilities specify the capabilities to keep when executing the process inside the container
// All capbilities not specified will be dropped from the processes capability mask
Capabilities []string `json:"capabilities"`
// Networks specifies the container's network setup to be created
Networks []*Network `json:"networks"`
// Routes can be specified to create entries in the route table as the container is started
Routes []*Route `json:"routes"`
// Cgroups specifies specific cgroup settings for the various subsystems that the container is
// placed into to limit the resources the container has available
Cgroups *Cgroup `json:"cgroups"`
// AppArmorProfile specifies the profile to apply to the process running in the container and is
// change at the time the process is execed
AppArmorProfile string `json:"apparmor_profile,omitempty"`
// ProcessLabel specifies the label to apply to the process running in the container. It is
// commonly used by selinux
ProcessLabel string `json:"process_label,omitempty"`
// Rlimits specifies the resource limits, such as max open files, to set in the container
// If Rlimits are not set, the container will inherit rlimits from the parent process
Rlimits []Rlimit `json:"rlimits,omitempty"`
// OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores
// for a process. Valid values are between the range [-1000, '1000'], where processes with
// higher scores are preferred for being killed.
// More information about kernel oom score calculation here: https://lwn.net/Articles/317814/
OomScoreAdj int `json:"oom_score_adj"`
// UidMappings is an array of User ID mappings for User Namespaces
UidMappings []IDMap `json:"uid_mappings"`
// GidMappings is an array of Group ID mappings for User Namespaces
GidMappings []IDMap `json:"gid_mappings"`
// MaskPaths specifies paths within the container's rootfs to mask over with a bind
// mount pointing to /dev/null as to prevent reads of the file.
MaskPaths []string `json:"mask_paths"`
// ReadonlyPaths specifies paths within the container's rootfs to remount as read-only
// so that these files prevent any writes.
ReadonlyPaths []string `json:"readonly_paths"`
// Sysctl is a map of properties and their values. It is the equivalent of using
// sysctl -w my.property.name value in Linux.
Sysctl map[string]string `json:"sysctl"`
// Seccomp allows actions to be taken whenever a syscall is made within the container.
// A number of rules are given, each having an action to be taken if a syscall matches it.
// A default action to be taken if no rules match is also given.
Seccomp *Seccomp `json:"seccomp"`
// NoNewPrivileges controls whether processes in the container can gain additional privileges.
NoNewPrivileges bool `json:"no_new_privileges,omitempty"`
// Hooks are a collection of actions to perform at various container lifecycle events.
// CommandHooks are serialized to JSON, but other hooks are not.
Hooks *Hooks
// Version is the version of opencontainer specification that is supported.
Version string `json:"version"`
// Labels are user defined metadata that is stored in the config and populated on the state
Labels []string `json:"labels"`
// NoNewKeyring will not allocated a new session keyring for the container. It will use the
// callers keyring in this case.
NoNewKeyring bool `json:"no_new_keyring"`
}
type Hooks struct {
// Prestart commands are executed after the container namespaces are created,
// but before the user supplied command is executed from init.
Prestart []Hook
// Poststart commands are executed after the container init process starts.
Poststart []Hook
// Poststop commands are executed after the container init process exits.
Poststop []Hook
}
func (hooks *Hooks) UnmarshalJSON(b []byte) error {
var state struct {
Prestart []CommandHook
Poststart []CommandHook
Poststop []CommandHook
}
if err := json.Unmarshal(b, &state); err != nil {
return err
}
deserialize := func(shooks []CommandHook) (hooks []Hook) {
for _, shook := range shooks {
hooks = append(hooks, shook)
}
return hooks
}
hooks.Prestart = deserialize(state.Prestart)
hooks.Poststart = deserialize(state.Poststart)
hooks.Poststop = deserialize(state.Poststop)
return nil
}
func (hooks Hooks) MarshalJSON() ([]byte, error) {
serialize := func(hooks []Hook) (serializableHooks []CommandHook) {
for _, hook := range hooks {
switch chook := hook.(type) {
case CommandHook:
serializableHooks = append(serializableHooks, chook)
default:
logrus.Warnf("cannot serialize hook of type %T, skipping", hook)
}
}
return serializableHooks
}
return json.Marshal(map[string]interface{}{
"prestart": serialize(hooks.Prestart),
"poststart": serialize(hooks.Poststart),
"poststop": serialize(hooks.Poststop),
})
}
// HookState is the payload provided to a hook on execution.
type HookState struct {
Version string `json:"ociVersion"`
ID string `json:"id"`
Pid int `json:"pid"`
Root string `json:"root"`
BundlePath string `json:"bundlePath"`
}
type Hook interface {
// Run executes the hook with the provided state.
Run(HookState) error
}
// NewFunctionHook will call the provided function when the hook is run.
func NewFunctionHook(f func(HookState) error) FuncHook {
return FuncHook{
run: f,
}
}
type FuncHook struct {
run func(HookState) error
}
func (f FuncHook) Run(s HookState) error {
return f.run(s)
}
type Command struct {
Path string `json:"path"`
Args []string `json:"args"`
Env []string `json:"env"`
Dir string `json:"dir"`
Timeout *time.Duration `json:"timeout"`
}
// NewCommandHook will execute the provided command when the hook is run.
func NewCommandHook(cmd Command) CommandHook {
return CommandHook{
Command: cmd,
}
}
type CommandHook struct {
Command
}
func (c Command) Run(s HookState) error {
b, err := json.Marshal(s)
if err != nil {
return err
}
var stdout, stderr bytes.Buffer
cmd := exec.Cmd{
Path: c.Path,
Args: c.Args,
Env: c.Env,
Stdin: bytes.NewReader(b),
Stdout: &stdout,
Stderr: &stderr,
}
if err := cmd.Start(); err != nil {
return err
}
errC := make(chan error, 1)
go func() {
err := cmd.Wait()
if err != nil {
err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String())
}
errC <- err
}()
var timerCh <-chan time.Time
if c.Timeout != nil {
timer := time.NewTimer(*c.Timeout)
defer timer.Stop()
timerCh = timer.C
}
select {
case err := <-errC:
return err
case <-timerCh:
cmd.Process.Kill()
cmd.Wait()
return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds())
}
}
| vendor/github.com/opencontainers/runc/libcontainer/configs/config.go | 0 | https://github.com/kubernetes/kubernetes/commit/3fbf68ef6847264f8dcf1934951ef1cd5548d2a8 | [
0.0002702397177927196,
0.00018078544235322624,
0.0001630910555832088,
0.00017179155838675797,
0.000021222949726507068
] |
{
"id": 2,
"code_window": [
"// getPodVolumePathListFromDisk returns a list of the volume paths by reading the\n",
"// volume directories for the given pod from the disk.\n",
"func (kl *Kubelet) getPodVolumePathListFromDisk(podUID types.UID) ([]string, error) {\n",
"\tvolumes := []string{}\n",
"\tpodVolDir := kl.getPodVolumesDir(podUID)\n",
"\tvolumePluginDirs, err := ioutil.ReadDir(podVolDir)\n",
"\tif err != nil {\n",
"\t\tglog.Errorf(\"Could not read directory %s: %v\", podVolDir, err)\n",
"\t\treturn volumes, err\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\n",
"\tif pathExists, pathErr := volumeutil.PathExists(podVolDir); pathErr != nil {\n",
"\t\treturn volumes, fmt.Errorf(\"Error checking if path %q exists: %v\", podVolDir, pathErr)\n",
"\t} else if !pathExists {\n",
"\t\tglog.Warningf(\"Warning: path %q does not exist: %q\", podVolDir)\n",
"\t\treturn volumes, nil\n",
"\t}\n",
"\n"
],
"file_path": "pkg/kubelet/kubelet_getters.go",
"type": "add",
"edit_start_line_idx": 246
} | /*-
* Copyright 2014 Square Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package josecipher
import (
"crypto"
"encoding/binary"
"hash"
"io"
)
type concatKDF struct {
z, info []byte
i uint32
cache []byte
hasher hash.Hash
}
// NewConcatKDF builds a KDF reader based on the given inputs.
func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader {
buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo)))
n := 0
n += copy(buffer, algID)
n += copy(buffer[n:], ptyUInfo)
n += copy(buffer[n:], ptyVInfo)
n += copy(buffer[n:], supPubInfo)
copy(buffer[n:], supPrivInfo)
hasher := hash.New()
return &concatKDF{
z: z,
info: buffer,
hasher: hasher,
cache: []byte{},
i: 1,
}
}
func (ctx *concatKDF) Read(out []byte) (int, error) {
copied := copy(out, ctx.cache)
ctx.cache = ctx.cache[copied:]
for copied < len(out) {
ctx.hasher.Reset()
// Write on a hash.Hash never fails
_ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i)
_, _ = ctx.hasher.Write(ctx.z)
_, _ = ctx.hasher.Write(ctx.info)
hash := ctx.hasher.Sum(nil)
chunkCopied := copy(out[copied:], hash)
copied += chunkCopied
ctx.cache = hash[chunkCopied:]
ctx.i++
}
return copied, nil
}
| vendor/github.com/square/go-jose/cipher/concat_kdf.go | 0 | https://github.com/kubernetes/kubernetes/commit/3fbf68ef6847264f8dcf1934951ef1cd5548d2a8 | [
0.0002570409851614386,
0.00018258423369843513,
0.00016493092698510736,
0.00017025653505697846,
0.000029001439543208107
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t\"github.com/pingcap/tidb/pkg/parser/ast\"\n",
"\t\"github.com/pingcap/tidb/pkg/parser/opcode\"\n",
")\n",
"\n",
"// UnCacheableFunctions stores functions which can not be cached to plan cache.\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/pkg/sessionctx\"\n"
],
"file_path": "pkg/expression/function_traits.go",
"type": "add",
"edit_start_line_idx": 19
} | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"github.com/pingcap/tidb/pkg/parser/ast"
"github.com/pingcap/tidb/pkg/parser/opcode"
)
// UnCacheableFunctions stores functions which can not be cached to plan cache.
var UnCacheableFunctions = map[string]struct{}{
ast.Database: {},
ast.CurrentUser: {},
ast.CurrentRole: {},
ast.CurrentResourceGroup: {},
ast.User: {},
ast.ConnectionID: {},
ast.LastInsertId: {},
ast.RowCount: {},
ast.Version: {},
ast.Like: {},
// functions below are incompatible with (non-prep) plan cache, we'll fix them one by one later.
ast.JSONExtract: {}, // cannot pass TestFuncJSON
ast.JSONObject: {},
ast.JSONArray: {},
ast.Coalesce: {},
ast.Convert: {},
ast.TimeLiteral: {},
ast.DateLiteral: {},
ast.TimestampLiteral: {},
ast.AesEncrypt: {}, // affected by @@block_encryption_mode
ast.AesDecrypt: {},
}
// unFoldableFunctions stores functions which can not be folded duration constant folding stage.
var unFoldableFunctions = map[string]struct{}{
ast.Sysdate: {},
ast.FoundRows: {},
ast.Rand: {},
ast.UUID: {},
ast.Sleep: {},
ast.RowFunc: {},
ast.Values: {},
ast.SetVar: {},
ast.GetVar: {},
ast.GetParam: {},
ast.Benchmark: {},
ast.DayName: {},
ast.NextVal: {},
ast.LastVal: {},
ast.SetVal: {},
ast.AnyValue: {},
}
// DisableFoldFunctions stores functions which prevent child scope functions from being constant folded.
// Typically, these functions shall also exist in unFoldableFunctions, to stop from being folded when they themselves
// are in child scope of an outer function, and the outer function is recursively folding its children.
var DisableFoldFunctions = map[string]struct{}{
ast.Benchmark: {},
}
// TryFoldFunctions stores functions which try to fold constant in child scope functions if without errors/warnings,
// otherwise, the child functions do not fold constant.
// Note: the function itself should fold constant.
var TryFoldFunctions = map[string]struct{}{
ast.If: {},
ast.Ifnull: {},
ast.Case: {},
ast.LogicAnd: {},
ast.LogicOr: {},
ast.Coalesce: {},
ast.Interval: {},
}
// IllegalFunctions4GeneratedColumns stores functions that is illegal for generated columns.
// See https://github.com/mysql/mysql-server/blob/5.7/mysql-test/suite/gcol/inc/gcol_blocked_sql_funcs_main.inc for details
var IllegalFunctions4GeneratedColumns = map[string]struct{}{
ast.ConnectionID: {},
ast.LoadFile: {},
ast.LastInsertId: {},
ast.Rand: {},
ast.UUID: {},
ast.UUIDShort: {},
ast.Curdate: {},
ast.CurrentDate: {},
ast.Curtime: {},
ast.CurrentTime: {},
ast.CurrentTimestamp: {},
ast.LocalTime: {},
ast.LocalTimestamp: {},
ast.Now: {},
ast.UnixTimestamp: {},
ast.UTCDate: {},
ast.UTCTime: {},
ast.UTCTimestamp: {},
ast.Benchmark: {},
ast.CurrentUser: {},
ast.Database: {},
ast.FoundRows: {},
ast.GetLock: {},
ast.IsFreeLock: {},
ast.IsUsedLock: {},
ast.MasterPosWait: {},
ast.NameConst: {},
ast.ReleaseLock: {},
ast.RowFunc: {},
ast.RowCount: {},
ast.Schema: {},
ast.SessionUser: {},
ast.Sleep: {},
ast.Sysdate: {},
ast.SystemUser: {},
ast.User: {},
ast.Values: {},
ast.Encrypt: {},
ast.Version: {},
ast.JSONMerge: {},
ast.SetVar: {},
ast.GetVar: {},
ast.ReleaseAllLocks: {},
}
// DeferredFunctions stores functions which are foldable but should be deferred as well when plan cache is enabled.
// Note that, these functions must be foldable at first place, i.e, they are not in `unFoldableFunctions`.
var DeferredFunctions = map[string]struct{}{
ast.Now: {},
ast.RandomBytes: {},
ast.CurrentTimestamp: {},
ast.UTCTime: {},
ast.Curtime: {},
ast.CurrentTime: {},
ast.UTCTimestamp: {},
ast.UnixTimestamp: {},
ast.Curdate: {},
ast.CurrentDate: {},
ast.UTCDate: {},
}
// AllowedPartitionFuncMap stores functions which can be used in the partition expression.
var AllowedPartitionFuncMap = map[string]struct{}{
ast.ToDays: {},
ast.ToSeconds: {},
ast.DayOfMonth: {},
ast.Month: {},
ast.DayOfYear: {},
ast.Quarter: {},
ast.YearWeek: {},
ast.Year: {},
ast.Weekday: {},
ast.DayOfWeek: {},
ast.Day: {},
ast.Hour: {},
ast.Minute: {},
ast.Second: {},
ast.TimeToSec: {},
ast.MicroSecond: {},
ast.UnixTimestamp: {},
ast.FromDays: {},
ast.Extract: {},
ast.Abs: {},
ast.Ceiling: {},
ast.DateDiff: {},
ast.Floor: {},
ast.Mod: {},
}
// AllowedPartition4BinaryOpMap store the operator for Binary Expr
// See https://dev.mysql.com/doc/refman/5.7/en/partitioning-limitations.html for more details
var AllowedPartition4BinaryOpMap = map[opcode.Op]struct{}{
opcode.Plus: {},
opcode.Minus: {},
opcode.Mul: {},
opcode.IntDiv: {},
opcode.Mod: {},
}
// AllowedPartition4UnaryOpMap store the operator for Unary Expr
var AllowedPartition4UnaryOpMap = map[opcode.Op]struct{}{
opcode.Plus: {},
opcode.Minus: {},
}
// inequalFunctions stores functions which cannot be propagated from column equal condition.
var inequalFunctions = map[string]struct{}{
ast.IsNull: {},
}
// mutableEffectsFunctions stores functions which are mutable or have side effects, specifically,
// we cannot remove them from filter even if they have duplicates.
var mutableEffectsFunctions = map[string]struct{}{
// Time related functions in MySQL have various behaviors when executed multiple times in a single SQL,
// for example:
// mysql> select current_timestamp(), sleep(5), current_timestamp();
// +---------------------+----------+---------------------+
// | current_timestamp() | sleep(5) | current_timestamp() |
// +---------------------+----------+---------------------+
// | 2018-12-18 17:55:39 | 0 | 2018-12-18 17:55:39 |
// +---------------------+----------+---------------------+
// while:
// mysql> select sysdate(), sleep(5), sysdate();
// +---------------------+----------+---------------------+
// | sysdate() | sleep(5) | sysdate() |
// +---------------------+----------+---------------------+
// | 2018-12-18 17:57:38 | 0 | 2018-12-18 17:57:43 |
// +---------------------+----------+---------------------+
// for safety consideration, treat them all as mutable.
ast.Now: {},
ast.CurrentTimestamp: {},
ast.UTCTime: {},
ast.Curtime: {},
ast.CurrentTime: {},
ast.UTCTimestamp: {},
ast.UnixTimestamp: {},
ast.Sysdate: {},
ast.Curdate: {},
ast.CurrentDate: {},
ast.UTCDate: {},
ast.Rand: {},
ast.RandomBytes: {},
ast.UUID: {},
ast.UUIDShort: {},
ast.Sleep: {},
ast.SetVar: {},
ast.GetVar: {},
ast.AnyValue: {},
}
// some functions do NOT have right implementations, but may have noop ones(like with any inputs, always return 1)
// if apps really need these "funcs" to run, we offer sys var(tidb_enable_noop_functions) to enable noop usage
var noopFuncs = map[string]struct{}{}
// booleanFunctions stores boolean functions
var booleanFunctions = map[string]struct{}{
ast.UnaryNot: {},
ast.EQ: {},
ast.NE: {},
ast.NullEQ: {},
ast.LT: {},
ast.LE: {},
ast.GT: {},
ast.GE: {},
ast.In: {},
ast.LogicAnd: {},
ast.LogicOr: {},
ast.LogicXor: {},
ast.IsTruthWithNull: {},
ast.IsTruthWithoutNull: {},
ast.IsFalsity: {},
ast.IsNull: {},
ast.Like: {},
ast.Regexp: {},
ast.IsIPv4: {},
ast.IsIPv4Compat: {},
ast.IsIPv4Mapped: {},
ast.IsIPv6: {},
ast.JSONValid: {},
ast.RegexpLike: {},
}
| pkg/expression/function_traits.go | 1 | https://github.com/pingcap/tidb/commit/f72e1d966ce316f6f8b391d8f3e328210f04c56f | [
0.14961543679237366,
0.008495328947901726,
0.0001657320826780051,
0.00017059675883501768,
0.028029289096593857
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t\"github.com/pingcap/tidb/pkg/parser/ast\"\n",
"\t\"github.com/pingcap/tidb/pkg/parser/opcode\"\n",
")\n",
"\n",
"// UnCacheableFunctions stores functions which can not be cached to plan cache.\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/pkg/sessionctx\"\n"
],
"file_path": "pkg/expression/function_traits.go",
"type": "add",
"edit_start_line_idx": 19
} | set tidb_cost_model_version=1;
set @@sql_mode='STRICT_TRANS_TABLES';
drop table if exists t1, t2, t3;
create table t1 (c1 int primary key, c2 int, c3 int, index c2 (c2));
load stats 's/explain_easy_stats_t1.json';
create table t2 (c1 int unique, c2 int);
load stats 's/explain_easy_stats_t2.json';
create table t3 (a bigint, b bigint, c bigint, d bigint);
load stats 's/explain_easy_stats_t3.json';
create table index_prune(a bigint(20) NOT NULL, b bigint(20) NOT NULL, c tinyint(4) NOT NULL, primary key(a, b), index idx_b_c_a(b, c, a));
load stats 's/explain_easy_stats_index_prune.json';
set @@session.tidb_opt_agg_push_down = 1;
set @@session.tidb_opt_insubq_to_join_and_agg=1;
set @@session.tidb_hashagg_partial_concurrency = 1;
set @@session.tidb_hashagg_final_concurrency = 1;
explain format = 'brief' select * from t3 where exists (select s.a from t3 s having sum(s.a) = t3.a );
id estRows task access object operator info
HashJoin 1600.00 root semi join, equal:[eq(Column#13, Column#11)]
├─StreamAgg(Build) 1.00 root funcs:sum(Column#16)->Column#11
│ └─TableReader 1.00 root data:StreamAgg
│ └─StreamAgg 1.00 cop[tikv] funcs:sum(explain_easy_stats.t3.a)->Column#16
│ └─TableFullScan 2000.00 cop[tikv] table:s keep order:false
└─Projection(Probe) 2000.00 root explain_easy_stats.t3.a, explain_easy_stats.t3.b, explain_easy_stats.t3.c, explain_easy_stats.t3.d, cast(explain_easy_stats.t3.a, decimal(20,0) BINARY)->Column#13
└─TableReader 2000.00 root data:TableFullScan
└─TableFullScan 2000.00 cop[tikv] table:t3 keep order:false
explain format = 'brief' select * from t1;
id estRows task access object operator info
TableReader 1999.00 root data:TableFullScan
└─TableFullScan 1999.00 cop[tikv] table:t1 keep order:false
explain format = 'brief' select * from t1 order by c2;
id estRows task access object operator info
IndexLookUp 1999.00 root
├─IndexFullScan(Build) 1999.00 cop[tikv] table:t1, index:c2(c2) keep order:true
└─TableRowIDScan(Probe) 1999.00 cop[tikv] table:t1 keep order:false
explain format = 'brief' select * from t2 order by c2;
id estRows task access object operator info
Sort 1985.00 root explain_easy_stats.t2.c2
└─TableReader 1985.00 root data:TableFullScan
└─TableFullScan 1985.00 cop[tikv] table:t2 keep order:false
explain format = 'brief' select * from t1 where t1.c1 > 0;
id estRows task access object operator info
TableReader 1999.00 root data:TableRangeScan
└─TableRangeScan 1999.00 cop[tikv] table:t1 range:(0,+inf], keep order:false
explain format = 'brief' select t1.c1, t1.c2 from t1 where t1.c2 = 1;
id estRows task access object operator info
IndexReader 0.00 root index:IndexRangeScan
└─IndexRangeScan 0.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false
explain format = 'brief' select * from t1 left join t2 on t1.c2 = t2.c1 where t1.c1 > 1;
id estRows task access object operator info
HashJoin 2481.25 root left outer join, equal:[eq(explain_easy_stats.t1.c2, explain_easy_stats.t2.c1)]
├─TableReader(Build) 1985.00 root data:Selection
│ └─Selection 1985.00 cop[tikv] not(isnull(explain_easy_stats.t2.c1))
│ └─TableFullScan 1985.00 cop[tikv] table:t2 keep order:false
└─TableReader(Probe) 1998.00 root data:TableRangeScan
└─TableRangeScan 1998.00 cop[tikv] table:t1 range:(1,+inf], keep order:false
explain format = 'brief' update t1 set t1.c2 = 2 where t1.c1 = 1;
id estRows task access object operator info
Update N/A root N/A
└─Point_Get 1.00 root table:t1 handle:1
explain format = 'brief' delete from t1 where t1.c2 = 1;
id estRows task access object operator info
Delete N/A root N/A
└─IndexLookUp 0.00 root
├─IndexRangeScan(Build) 0.00 cop[tikv] table:t1, index:c2(c2) range:[1,1], keep order:false
└─TableRowIDScan(Probe) 0.00 cop[tikv] table:t1 keep order:false
explain format = 'brief' select count(b.c2) from t1 a, t2 b where a.c1 = b.c2 group by a.c1;
id estRows task access object operator info
Projection 1985.00 root Column#7
└─HashJoin 1985.00 root inner join, equal:[eq(explain_easy_stats.t1.c1, explain_easy_stats.t2.c2)]
├─HashAgg(Build) 1985.00 root group by:explain_easy_stats.t2.c2, funcs:count(explain_easy_stats.t2.c2)->Column#8, funcs:firstrow(explain_easy_stats.t2.c2)->explain_easy_stats.t2.c2
│ └─TableReader 1985.00 root data:Selection
│ └─Selection 1985.00 cop[tikv] not(isnull(explain_easy_stats.t2.c2))
│ └─TableFullScan 1985.00 cop[tikv] table:b keep order:false
└─TableReader(Probe) 1999.00 root data:TableFullScan
└─TableFullScan 1999.00 cop[tikv] table:a keep order:false
explain format = 'brief' select * from t2 order by t2.c2 limit 0, 1;
id estRows task access object operator info
TopN 1.00 root explain_easy_stats.t2.c2, offset:0, count:1
└─TableReader 1.00 root data:TopN
└─TopN 1.00 cop[tikv] explain_easy_stats.t2.c2, offset:0, count:1
└─TableFullScan 1985.00 cop[tikv] table:t2 keep order:false
explain format = 'brief' select * from t1 where c1 > 1 and c2 = 1 and c3 < 1;
id estRows task access object operator info
IndexLookUp 0.00 root
├─IndexRangeScan(Build) 0.00 cop[tikv] table:t1, index:c2(c2) range:(1 1,1 +inf], keep order:false
└─Selection(Probe) 0.00 cop[tikv] lt(explain_easy_stats.t1.c3, 1)
└─TableRowIDScan 0.00 cop[tikv] table:t1 keep order:false
explain format = 'brief' select * from t1 where c1 = 1 and c2 > 1;
id estRows task access object operator info
Selection 0.50 root gt(explain_easy_stats.t1.c2, 1)
└─Point_Get 1.00 root table:t1 handle:1
explain format = 'brief' select c1 from t1 where c1 in (select c2 from t2);
id estRows task access object operator info
HashJoin 1985.00 root inner join, equal:[eq(explain_easy_stats.t1.c1, explain_easy_stats.t2.c2)]
├─HashAgg(Build) 1985.00 root group by:explain_easy_stats.t2.c2, funcs:firstrow(explain_easy_stats.t2.c2)->explain_easy_stats.t2.c2
│ └─TableReader 1985.00 root data:Selection
│ └─Selection 1985.00 cop[tikv] not(isnull(explain_easy_stats.t2.c2))
│ └─TableFullScan 1985.00 cop[tikv] table:t2 keep order:false
└─TableReader(Probe) 1999.00 root data:TableFullScan
└─TableFullScan 1999.00 cop[tikv] table:t1 keep order:false
explain format = 'brief' select * from information_schema.columns;
id estRows task access object operator info
MemTableScan 10000.00 root table:COLUMNS
explain format = 'brief' select c2 = (select c2 from t2 where t1.c1 = t2.c1 order by c1 limit 1) from t1;
id estRows task access object operator info
Projection 1999.00 root eq(explain_easy_stats.t1.c2, explain_easy_stats.t2.c2)->Column#11
└─Apply 1999.00 root CARTESIAN left outer join
├─TableReader(Build) 1999.00 root data:TableFullScan
│ └─TableFullScan 1999.00 cop[tikv] table:t1 keep order:false
└─TopN(Probe) 1999.00 root explain_easy_stats.t2.c1, offset:0, count:1
└─IndexLookUp 1999.00 root
├─TopN(Build) 1999.00 cop[tikv] explain_easy_stats.t2.c1, offset:0, count:1
│ └─IndexRangeScan 4960.02 cop[tikv] table:t2, index:c1(c1) range: decided by [eq(explain_easy_stats.t1.c1, explain_easy_stats.t2.c1)], keep order:false
└─TableRowIDScan(Probe) 1999.00 cop[tikv] table:t2 keep order:false
explain format = 'brief' select * from t1 order by c1 desc limit 1;
id estRows task access object operator info
Limit 1.00 root offset:0, count:1
└─TableReader 1.00 root data:Limit
└─Limit 1.00 cop[tikv] offset:0, count:1
└─TableFullScan 1.00 cop[tikv] table:t1 keep order:true, desc
set @@session.tidb_opt_insubq_to_join_and_agg=0;
explain format = 'brief' select 1 in (select c2 from t2) from t1;
id estRows task access object operator info
HashJoin 1999.00 root CARTESIAN left outer semi join, other cond:eq(1, explain_easy_stats.t2.c2)
├─TableReader(Build) 1985.00 root data:TableFullScan
│ └─TableFullScan 1985.00 cop[tikv] table:t2 keep order:false
└─TableReader(Probe) 1999.00 root data:TableFullScan
└─TableFullScan 1999.00 cop[tikv] table:t1 keep order:false
explain format="dot" select 1 in (select c2 from t2) from t1;
dot contents
digraph HashJoin_8 {
subgraph cluster8{
node [style=filled, color=lightgrey]
color=black
label = "root"
"HashJoin_8" -> "TableReader_10"
"HashJoin_8" -> "TableReader_14"
}
subgraph cluster9{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"TableFullScan_9"
}
subgraph cluster13{
node [style=filled, color=lightgrey]
color=black
label = "cop"
"TableFullScan_13"
}
"TableReader_10" -> "TableFullScan_9"
"TableReader_14" -> "TableFullScan_13"
}
explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1;
id estRows task access object operator info
Point_Get 1.00 root table:index_prune, index:PRIMARY(a, b)
explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 0;
id estRows task access object operator info
TableDual 0.00 root rows:0
explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1, 1;
id estRows task access object operator info
Limit 1.00 root offset:1, count:1
└─Point_Get 1.00 root table:index_prune, index:PRIMARY(a, b)
explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 1, 0;
id estRows task access object operator info
Limit 0.00 root offset:1, count:0
└─Point_Get 1.00 root table:index_prune, index:PRIMARY(a, b)
explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 LIMIT 0, 1;
id estRows task access object operator info
Point_Get 1.00 root table:index_prune, index:PRIMARY(a, b)
explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 ORDER BY a;
id estRows task access object operator info
Point_Get 1.00 root table:index_prune, index:PRIMARY(a, b)
explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 GROUP BY b;
id estRows task access object operator info
Point_Get 1.00 root table:index_prune, index:PRIMARY(a, b)
explain format = 'brief' select * from index_prune WHERE a = 1010010404050976781 AND b = 26467085526790 GROUP BY b ORDER BY a limit 1;
id estRows task access object operator info
TopN 1.00 root explain_easy_stats.index_prune.a, offset:0, count:1
└─StreamAgg 1.00 root group by:explain_easy_stats.index_prune.b, funcs:firstrow(explain_easy_stats.index_prune.a)->explain_easy_stats.index_prune.a, funcs:firstrow(explain_easy_stats.index_prune.b)->explain_easy_stats.index_prune.b, funcs:firstrow(explain_easy_stats.index_prune.c)->explain_easy_stats.index_prune.c
└─Point_Get 1.00 root table:index_prune, index:PRIMARY(a, b)
drop table if exists t1, t2, t3, index_prune;
set @@session.tidb_opt_insubq_to_join_and_agg=1;
drop table if exists tbl;
create table tbl(column1 int, column2 int, index idx(column1, column2));
load stats 's/explain_easy_stats_tbl_dnf.json';
explain format = 'brief' select * from tbl where (column1=0 and column2=1) or (column1=1 and column2=3) or (column1=2 and column2=5);
id estRows task access object operator info
IndexReader 3.00 root index:IndexRangeScan
└─IndexRangeScan 3.00 cop[tikv] table:tbl, index:idx(column1, column2) range:[0 1,0 1], [1 3,1 3], [2 5,2 5], keep order:false
set @@sql_mode=default;
| tests/integrationtest/r/explain_easy_stats.result | 0 | https://github.com/pingcap/tidb/commit/f72e1d966ce316f6f8b391d8f3e328210f04c56f | [
0.000174104847246781,
0.0001690883655101061,
0.00016462312487419695,
0.00016859130118973553,
0.0000027779208267020294
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t\"github.com/pingcap/tidb/pkg/parser/ast\"\n",
"\t\"github.com/pingcap/tidb/pkg/parser/opcode\"\n",
")\n",
"\n",
"// UnCacheableFunctions stores functions which can not be cached to plan cache.\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/pkg/sessionctx\"\n"
],
"file_path": "pkg/expression/function_traits.go",
"type": "add",
"edit_start_line_idx": 19
} | #!/bin/sh
#
# Copyright 2022 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
check_cluster_version 5 4 0 'new collation' || { echo 'TiDB does not support new collation! skipping test'; exit 0; }
set -eu
cur=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
. $cur/../_utils/run_services
# restart cluster with new collation enabled
start_services --tidb-cfg $cur/tidb-new-collation.toml
DB="$TEST_NAME"
run_sql "CREATE DATABASE $DB;"
run_sql "CREATE TABLE $DB.测试 ( \
a char(20) DEFAULT NULL, \
b tinyblob, \
c binary(100) DEFAULT NULL, \
d json DEFAULT NULL, \
e timestamp NULL DEFAULT NULL, \
f set('a一','b二','c三','d四') DEFAULT NULL, \
g text, \
h enum('a一','b二','c三','d四') DEFAULT 'c三' \
) ENGINE=InnoDB DEFAULT CHARSET=gbk COLLATE=gbk_chinese_ci;"
run_sql "INSERT INTO $DB.测试 VALUES ('你好', '你好', '你好', '{\"测试\": \"你好\"}', '2018-10-13', 1, '你好', 'a一');"
run_sql "INSERT INTO $DB.测试 VALUES ('你好123', '你好', '你好', '{\"测试\": \"你好\"}', '2018-10-13', 1, '你好', 'a一');"
run_sql "CREATE TABLE $DB.t ( \
YCSB_KEY varchar(64) NOT NULL, \
FIELD0 varchar(1) DEFAULT NULL, \
PRIMARY KEY (YCSB_KEY) \
) ENGINE=InnoDB DEFAULT CHARSET=gbk;"
run_sql "INSERT INTO $DB.t VALUES (\"测试\", \"你\");"
run_sql "SET NAMES gbk; INSERT INTO $DB.t VALUES (\"测试\", \"a\"); SET NAMES default;"
# backup db
echo "backup start..."
run_br --pd $PD_ADDR backup db --db "$DB" -s "local://$TEST_DIR/$DB"
run_sql "DROP DATABASE $DB;"
# restore db
echo "restore start..."
run_br restore db --db $DB -s "local://$TEST_DIR/$DB" --pd $PD_ADDR
table_count=$(run_sql "use $DB; show tables;" | grep "Tables_in" | wc -l)
if [ "$table_count" -ne "2" ];then
echo "TEST: [$TEST_NAME] failed!"
exit 1
fi
run_sql "SELECT * from $DB.测试;"
check_contains "{\"测试\": \"你好\"}"
check_contains "你好123"
run_sql "SELECT hex(a) from $DB.测试;"
check_contains "C4E3BAC3"
run_sql "SELECT * from $DB.t;"
check_contains "你"
check_contains "测试"
check_contains "娴嬭瘯"
# Test BR DDL query string
echo "testing DDL query..."
run_curl https://$TIDB_STATUS_ADDR/ddl/history | grep -E '/\*from\(br\)\*/CREATE TABLE.*CHARSET=gbk'
run_curl https://$TIDB_STATUS_ADDR/ddl/history | grep -E '/\*from\(br\)\*/CREATE DATABASE'
run_sql "DROP DATABASE $DB;"
| br/tests/br_charset_gbk/run.sh | 0 | https://github.com/pingcap/tidb/commit/f72e1d966ce316f6f8b391d8f3e328210f04c56f | [
0.0001761939056450501,
0.00016892494750209153,
0.0001627761375857517,
0.0001695514947641641,
0.000003603118102546432
] |
{
"id": 0,
"code_window": [
"import (\n",
"\t\"github.com/pingcap/tidb/pkg/parser/ast\"\n",
"\t\"github.com/pingcap/tidb/pkg/parser/opcode\"\n",
")\n",
"\n",
"// UnCacheableFunctions stores functions which can not be cached to plan cache.\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"github.com/pingcap/tidb/pkg/sessionctx\"\n"
],
"file_path": "pkg/expression/function_traits.go",
"type": "add",
"edit_start_line_idx": 19
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/pkg/executor/internal/exec"
"github.com/pingcap/tidb/pkg/expression"
"github.com/pingcap/tidb/pkg/sessionctx"
"github.com/pingcap/tidb/pkg/sessionctx/variable"
"github.com/pingcap/tidb/pkg/util"
"github.com/pingcap/tidb/pkg/util/chunk"
"github.com/pingcap/tidb/pkg/util/codec"
"github.com/pingcap/tidb/pkg/util/cteutil"
"github.com/pingcap/tidb/pkg/util/dbterror/exeerrors"
"github.com/pingcap/tidb/pkg/util/disk"
"github.com/pingcap/tidb/pkg/util/memory"
)
var _ exec.Executor = &CTEExec{}
// CTEExec implements CTE.
// Following diagram describes how CTEExec works.
//
// `iterInTbl` is shared by `CTEExec` and `CTETableReaderExec`.
// `CTETableReaderExec` reads data from `iterInTbl`,
// and its output will be stored `iterOutTbl` by `CTEExec`.
//
// When an iteration ends, `CTEExec` will move all data from `iterOutTbl` into `iterInTbl`,
// which will be the input for new iteration.
// At the end of each iteration, data in `iterOutTbl` will also be added into `resTbl`.
// `resTbl` stores data of all iteration.
/*
+----------+
write |iterOutTbl|
CTEExec ------------------->| |
| +----+-----+
------------- | write
| | v
other op other op +----------+
(seed) (recursive) | resTbl |
^ | |
| +----------+
CTETableReaderExec
^
| read +----------+
+---------------+iterInTbl |
| |
+----------+
*/
type CTEExec struct {
exec.BaseExecutor
chkIdx int
producer *cteProducer
// limit in recursive CTE.
cursor uint64
meetFirstBatch bool
}
// Open implements the Executor interface.
func (e *CTEExec) Open(ctx context.Context) (err error) {
e.reset()
if err := e.BaseExecutor.Open(ctx); err != nil {
return err
}
e.producer.resTbl.Lock()
defer e.producer.resTbl.Unlock()
if e.producer.checkAndUpdateCorColHashCode() {
e.producer.reset()
if err = e.producer.reopenTbls(); err != nil {
return err
}
}
if !e.producer.opened {
if err = e.producer.openProducer(ctx, e); err != nil {
return err
}
}
return nil
}
// Next implements the Executor interface.
func (e *CTEExec) Next(ctx context.Context, req *chunk.Chunk) (err error) {
e.producer.resTbl.Lock()
defer e.producer.resTbl.Unlock()
if !e.producer.resTbl.Done() {
if err = e.producer.produce(ctx, e); err != nil {
return err
}
}
return e.producer.getChunk(e, req)
}
// Close implements the Executor interface.
func (e *CTEExec) Close() (err error) {
func() {
e.producer.resTbl.Lock()
defer e.producer.resTbl.Unlock()
if !e.producer.closed {
failpoint.Inject("mock_cte_exec_panic_avoid_deadlock", func(v failpoint.Value) {
ok := v.(bool)
if ok {
// mock an oom panic, returning ErrMemoryExceedForQuery for error identification in recovery work.
panic(exeerrors.ErrMemoryExceedForQuery)
}
})
// closeProducer() only close seedExec and recursiveExec, will not touch resTbl.
// It means you can still read resTbl after call closeProducer().
// You can even call all three functions(openProducer/produce/closeProducer) in CTEExec.Next().
// Separating these three function calls is only to follow the abstraction of the volcano model.
err = e.producer.closeProducer()
}
}()
if err != nil {
return err
}
return e.BaseExecutor.Close()
}
func (e *CTEExec) reset() {
e.chkIdx = 0
e.cursor = 0
e.meetFirstBatch = false
}
type cteProducer struct {
opened bool
produced bool
closed bool
ctx sessionctx.Context
seedExec exec.Executor
recursiveExec exec.Executor
// `resTbl` and `iterInTbl` are shared by all CTEExec which reference to same the CTE.
// `iterInTbl` is also shared by CTETableReaderExec.
resTbl cteutil.Storage
iterInTbl cteutil.Storage
iterOutTbl cteutil.Storage
hashTbl baseHashTable
// UNION ALL or UNION DISTINCT.
isDistinct bool
curIter int
hCtx *hashContext
sel []int
// Limit related info.
hasLimit bool
limitBeg uint64
limitEnd uint64
memTracker *memory.Tracker
diskTracker *disk.Tracker
// Correlated Column.
corCols []*expression.CorrelatedColumn
corColHashCodes [][]byte
}
func (p *cteProducer) openProducer(ctx context.Context, cteExec *CTEExec) (err error) {
if p.seedExec == nil {
return errors.New("seedExec for CTEExec is nil")
}
if err = exec.Open(ctx, p.seedExec); err != nil {
return err
}
if p.memTracker != nil {
p.memTracker.Reset()
} else {
p.memTracker = memory.NewTracker(cteExec.ID(), -1)
}
p.diskTracker = disk.NewTracker(cteExec.ID(), -1)
p.memTracker.AttachTo(p.ctx.GetSessionVars().StmtCtx.MemTracker)
p.diskTracker.AttachTo(p.ctx.GetSessionVars().StmtCtx.DiskTracker)
if p.recursiveExec != nil {
if err = exec.Open(ctx, p.recursiveExec); err != nil {
return err
}
// For non-recursive CTE, the result will be put into resTbl directly.
// So no need to build iterOutTbl.
// Construct iterOutTbl in Open() instead of buildCTE(), because its destruct is in Close().
recursiveTypes := p.recursiveExec.RetFieldTypes()
p.iterOutTbl = cteutil.NewStorageRowContainer(recursiveTypes, cteExec.MaxChunkSize())
if err = p.iterOutTbl.OpenAndRef(); err != nil {
return err
}
}
if p.isDistinct {
p.hashTbl = newConcurrentMapHashTable()
p.hCtx = &hashContext{
allTypes: cteExec.RetFieldTypes(),
}
// We use all columns to compute hash.
p.hCtx.keyColIdx = make([]int, len(p.hCtx.allTypes))
for i := range p.hCtx.keyColIdx {
p.hCtx.keyColIdx[i] = i
}
}
p.opened = true
return nil
}
func (p *cteProducer) closeProducer() (err error) {
if err = exec.Close(p.seedExec); err != nil {
return err
}
if p.recursiveExec != nil {
if err = exec.Close(p.recursiveExec); err != nil {
return err
}
// `iterInTbl` and `resTbl` are shared by multiple operators,
// so will be closed when the SQL finishes.
if p.iterOutTbl != nil {
if err = p.iterOutTbl.DerefAndClose(); err != nil {
return err
}
}
}
p.closed = true
return nil
}
func (p *cteProducer) getChunk(cteExec *CTEExec, req *chunk.Chunk) (err error) {
req.Reset()
if p.hasLimit {
return p.nextChunkLimit(cteExec, req)
}
if cteExec.chkIdx < p.resTbl.NumChunks() {
res, err := p.resTbl.GetChunk(cteExec.chkIdx)
if err != nil {
return err
}
// Need to copy chunk to make sure upper operator will not change chunk in resTbl.
// Also we ignore copying rows not selected, because some operators like Projection
// doesn't support swap column if chunk.sel is no nil.
req.SwapColumns(res.CopyConstructSel())
cteExec.chkIdx++
}
return nil
}
func (p *cteProducer) nextChunkLimit(cteExec *CTEExec, req *chunk.Chunk) error {
if !cteExec.meetFirstBatch {
for cteExec.chkIdx < p.resTbl.NumChunks() {
res, err := p.resTbl.GetChunk(cteExec.chkIdx)
if err != nil {
return err
}
cteExec.chkIdx++
numRows := uint64(res.NumRows())
if newCursor := cteExec.cursor + numRows; newCursor >= p.limitBeg {
cteExec.meetFirstBatch = true
begInChk, endInChk := p.limitBeg-cteExec.cursor, numRows
if newCursor > p.limitEnd {
endInChk = p.limitEnd - cteExec.cursor
}
cteExec.cursor += endInChk
if begInChk == endInChk {
break
}
tmpChk := res.CopyConstructSel()
req.Append(tmpChk, int(begInChk), int(endInChk))
return nil
}
cteExec.cursor += numRows
}
}
if cteExec.chkIdx < p.resTbl.NumChunks() && cteExec.cursor < p.limitEnd {
res, err := p.resTbl.GetChunk(cteExec.chkIdx)
if err != nil {
return err
}
cteExec.chkIdx++
numRows := uint64(res.NumRows())
if cteExec.cursor+numRows > p.limitEnd {
numRows = p.limitEnd - cteExec.cursor
req.Append(res.CopyConstructSel(), 0, int(numRows))
} else {
req.SwapColumns(res.CopyConstructSel())
}
cteExec.cursor += numRows
}
return nil
}
func (p *cteProducer) produce(ctx context.Context, cteExec *CTEExec) (err error) {
if p.resTbl.Error() != nil {
return p.resTbl.Error()
}
resAction := setupCTEStorageTracker(p.resTbl, cteExec.Ctx(), p.memTracker, p.diskTracker)
iterInAction := setupCTEStorageTracker(p.iterInTbl, cteExec.Ctx(), p.memTracker, p.diskTracker)
var iterOutAction *chunk.SpillDiskAction
if p.iterOutTbl != nil {
iterOutAction = setupCTEStorageTracker(p.iterOutTbl, cteExec.Ctx(), p.memTracker, p.diskTracker)
}
failpoint.Inject("testCTEStorageSpill", func(val failpoint.Value) {
if val.(bool) && variable.EnableTmpStorageOnOOM.Load() {
defer resAction.WaitForTest()
defer iterInAction.WaitForTest()
if iterOutAction != nil {
defer iterOutAction.WaitForTest()
}
}
})
if err = p.computeSeedPart(ctx); err != nil {
p.resTbl.SetError(err)
return err
}
if err = p.computeRecursivePart(ctx); err != nil {
p.resTbl.SetError(err)
return err
}
p.resTbl.SetDone()
return nil
}
func (p *cteProducer) computeSeedPart(ctx context.Context) (err error) {
defer func() {
if r := recover(); r != nil && err == nil {
err = util.GetRecoverError(r)
}
}()
failpoint.Inject("testCTESeedPanic", nil)
p.curIter = 0
p.iterInTbl.SetIter(p.curIter)
chks := make([]*chunk.Chunk, 0, 10)
for {
if p.limitDone(p.iterInTbl) {
break
}
chk := exec.TryNewCacheChunk(p.seedExec)
if err = exec.Next(ctx, p.seedExec, chk); err != nil {
return
}
if chk.NumRows() == 0 {
break
}
if chk, err = p.tryDedupAndAdd(chk, p.iterInTbl, p.hashTbl); err != nil {
return
}
chks = append(chks, chk)
}
// Initial resTbl is empty, so no need to deduplicate chk using resTbl.
// Just adding is ok.
for _, chk := range chks {
if err = p.resTbl.Add(chk); err != nil {
return
}
}
p.curIter++
p.iterInTbl.SetIter(p.curIter)
return
}
func (p *cteProducer) computeRecursivePart(ctx context.Context) (err error) {
defer func() {
if r := recover(); r != nil && err == nil {
err = util.GetRecoverError(r)
}
}()
failpoint.Inject("testCTERecursivePanic", nil)
if p.recursiveExec == nil || p.iterInTbl.NumChunks() == 0 {
return
}
if p.curIter > p.ctx.GetSessionVars().CTEMaxRecursionDepth {
return exeerrors.ErrCTEMaxRecursionDepth.GenWithStackByArgs(p.curIter)
}
if p.limitDone(p.resTbl) {
return
}
for {
chk := exec.TryNewCacheChunk(p.recursiveExec)
if err = exec.Next(ctx, p.recursiveExec, chk); err != nil {
return
}
if chk.NumRows() == 0 {
if err = p.setupTblsForNewIteration(); err != nil {
return
}
if p.limitDone(p.resTbl) {
break
}
if p.iterInTbl.NumChunks() == 0 {
break
}
// Next iteration begins. Need use iterOutTbl as input of next iteration.
p.curIter++
p.iterInTbl.SetIter(p.curIter)
if p.curIter > p.ctx.GetSessionVars().CTEMaxRecursionDepth {
return exeerrors.ErrCTEMaxRecursionDepth.GenWithStackByArgs(p.curIter)
}
// Make sure iterInTbl is setup before Close/Open,
// because some executors will read iterInTbl in Open() (like IndexLookupJoin).
if err = exec.Close(p.recursiveExec); err != nil {
return
}
if err = exec.Open(ctx, p.recursiveExec); err != nil {
return
}
} else {
if err = p.iterOutTbl.Add(chk); err != nil {
return
}
}
}
return
}
func (p *cteProducer) setupTblsForNewIteration() (err error) {
num := p.iterOutTbl.NumChunks()
chks := make([]*chunk.Chunk, 0, num)
// Setup resTbl's data.
for i := 0; i < num; i++ {
chk, err := p.iterOutTbl.GetChunk(i)
if err != nil {
return err
}
// Data should be copied in UNION DISTINCT.
// Because deduplicate() will change data in iterOutTbl,
// which will cause panic when spilling data into disk concurrently.
if p.isDistinct {
chk = chk.CopyConstruct()
}
chk, err = p.tryDedupAndAdd(chk, p.resTbl, p.hashTbl)
if err != nil {
return err
}
chks = append(chks, chk)
}
// Setup new iteration data in iterInTbl.
if err = p.iterInTbl.Reopen(); err != nil {
return err
}
if p.isDistinct {
// Already deduplicated by resTbl, adding directly is ok.
for _, chk := range chks {
if err = p.iterInTbl.Add(chk); err != nil {
return err
}
}
} else {
if err = p.iterInTbl.SwapData(p.iterOutTbl); err != nil {
return err
}
}
// Clear data in iterOutTbl.
return p.iterOutTbl.Reopen()
}
func (p *cteProducer) reset() {
p.curIter = 0
p.hashTbl = nil
p.opened = false
p.produced = false
p.closed = false
}
func (p *cteProducer) reopenTbls() (err error) {
if p.isDistinct {
p.hashTbl = newConcurrentMapHashTable()
}
if err := p.resTbl.Reopen(); err != nil {
return err
}
return p.iterInTbl.Reopen()
}
// Check if tbl meets the requirement of limit.
func (p *cteProducer) limitDone(tbl cteutil.Storage) bool {
return p.hasLimit && uint64(tbl.NumRows()) >= p.limitEnd
}
func setupCTEStorageTracker(tbl cteutil.Storage, ctx sessionctx.Context, parentMemTracker *memory.Tracker,
parentDiskTracker *disk.Tracker) (actionSpill *chunk.SpillDiskAction) {
memTracker := tbl.GetMemTracker()
memTracker.SetLabel(memory.LabelForCTEStorage)
memTracker.AttachTo(parentMemTracker)
diskTracker := tbl.GetDiskTracker()
diskTracker.SetLabel(memory.LabelForCTEStorage)
diskTracker.AttachTo(parentDiskTracker)
if variable.EnableTmpStorageOnOOM.Load() {
actionSpill = tbl.ActionSpill()
failpoint.Inject("testCTEStorageSpill", func(val failpoint.Value) {
if val.(bool) {
actionSpill = tbl.(*cteutil.StorageRC).ActionSpillForTest()
}
})
ctx.GetSessionVars().MemTracker.FallbackOldAndSetNewAction(actionSpill)
}
return actionSpill
}
func (p *cteProducer) tryDedupAndAdd(chk *chunk.Chunk,
storage cteutil.Storage,
hashTbl baseHashTable) (res *chunk.Chunk, err error) {
if p.isDistinct {
if chk, err = p.deduplicate(chk, storage, hashTbl); err != nil {
return nil, err
}
}
return chk, storage.Add(chk)
}
// Compute hash values in chk and put it in hCtx.hashVals.
// Use the returned sel to choose the computed hash values.
func (p *cteProducer) computeChunkHash(chk *chunk.Chunk) (sel []int, err error) {
numRows := chk.NumRows()
p.hCtx.initHash(numRows)
// Continue to reset to make sure all hasher is new.
for i := numRows; i < len(p.hCtx.hashVals); i++ {
p.hCtx.hashVals[i].Reset()
}
sel = chk.Sel()
var hashBitMap []bool
if sel != nil {
hashBitMap = make([]bool, chk.Capacity())
for _, val := range sel {
hashBitMap[val] = true
}
} else {
// Length of p.sel is init as MaxChunkSize, but the row num of chunk may still exceeds MaxChunkSize.
// So needs to handle here to make sure len(p.sel) == chk.NumRows().
if len(p.sel) < numRows {
tmpSel := make([]int, numRows-len(p.sel))
for i := 0; i < len(tmpSel); i++ {
tmpSel[i] = i + len(p.sel)
}
p.sel = append(p.sel, tmpSel...)
}
// All rows is selected, sel will be [0....numRows).
// e.sel is setup when building executor.
sel = p.sel
}
for i := 0; i < chk.NumCols(); i++ {
if err = codec.HashChunkSelected(p.ctx.GetSessionVars().StmtCtx.TypeCtx(), p.hCtx.hashVals,
chk, p.hCtx.allTypes[i], i, p.hCtx.buf, p.hCtx.hasNull,
hashBitMap, false); err != nil {
return nil, err
}
}
return sel, nil
}
// Use hashTbl to deduplicate rows, and unique rows will be added to hashTbl.
// Duplicated rows are only marked to be removed by sel in Chunk, instead of really deleted.
func (p *cteProducer) deduplicate(chk *chunk.Chunk,
storage cteutil.Storage,
hashTbl baseHashTable) (chkNoDup *chunk.Chunk, err error) {
numRows := chk.NumRows()
if numRows == 0 {
return chk, nil
}
// 1. Compute hash values for chunk.
chkHashTbl := newConcurrentMapHashTable()
selOri, err := p.computeChunkHash(chk)
if err != nil {
return nil, err
}
// 2. Filter rows duplicated in input chunk.
// This sel is for filtering rows duplicated in cur chk.
selChk := make([]int, 0, numRows)
for i := 0; i < numRows; i++ {
key := p.hCtx.hashVals[selOri[i]].Sum64()
row := chk.GetRow(i)
hasDup, err := p.checkHasDup(key, row, chk, storage, chkHashTbl)
if err != nil {
return nil, err
}
if hasDup {
continue
}
selChk = append(selChk, selOri[i])
rowPtr := chunk.RowPtr{ChkIdx: uint32(0), RowIdx: uint32(i)}
chkHashTbl.Put(key, rowPtr)
}
chk.SetSel(selChk)
chkIdx := storage.NumChunks()
// 3. Filter rows duplicated in RowContainer.
// This sel is for filtering rows duplicated in cteutil.Storage.
selStorage := make([]int, 0, len(selChk))
for i := 0; i < len(selChk); i++ {
key := p.hCtx.hashVals[selChk[i]].Sum64()
row := chk.GetRow(i)
hasDup, err := p.checkHasDup(key, row, nil, storage, hashTbl)
if err != nil {
return nil, err
}
if hasDup {
continue
}
rowIdx := len(selStorage)
selStorage = append(selStorage, selChk[i])
rowPtr := chunk.RowPtr{ChkIdx: uint32(chkIdx), RowIdx: uint32(rowIdx)}
hashTbl.Put(key, rowPtr)
}
chk.SetSel(selStorage)
return chk, nil
}
// Use the row's probe key to check if it already exists in chk or storage.
// We also need to compare the row's real encoding value to avoid hash collision.
func (p *cteProducer) checkHasDup(probeKey uint64,
row chunk.Row,
curChk *chunk.Chunk,
storage cteutil.Storage,
hashTbl baseHashTable) (hasDup bool, err error) {
entry := hashTbl.Get(probeKey)
for ; entry != nil; entry = entry.next {
ptr := entry.ptr
var matchedRow chunk.Row
if curChk != nil {
matchedRow = curChk.GetRow(int(ptr.RowIdx))
} else {
matchedRow, err = storage.GetRow(ptr)
}
if err != nil {
return false, err
}
isEqual, err := codec.EqualChunkRow(p.ctx.GetSessionVars().StmtCtx.TypeCtx(),
row, p.hCtx.allTypes, p.hCtx.keyColIdx,
matchedRow, p.hCtx.allTypes, p.hCtx.keyColIdx)
if err != nil {
return false, err
}
if isEqual {
return true, nil
}
}
return false, nil
}
func getCorColHashCode(corCol *expression.CorrelatedColumn) (res []byte) {
return codec.HashCode(res, *corCol.Data)
}
// Return true if cor col has changed.
func (p *cteProducer) checkAndUpdateCorColHashCode() bool {
var changed bool
for i, corCol := range p.corCols {
newHashCode := getCorColHashCode(corCol)
if !bytes.Equal(newHashCode, p.corColHashCodes[i]) {
changed = true
p.corColHashCodes[i] = newHashCode
}
}
return changed
}
| pkg/executor/cte.go | 0 | https://github.com/pingcap/tidb/commit/f72e1d966ce316f6f8b391d8f3e328210f04c56f | [
0.0003906286437995732,
0.00017702675540931523,
0.0001621617266209796,
0.00016995161422528327,
0.000035119071981171146
] |
{
"id": 1,
"code_window": [
"\tast.SetVar: {},\n",
"\tast.GetVar: {},\n",
"\tast.ReleaseAllLocks: {},\n",
"}\n",
"\n",
"// DeferredFunctions stores functions which are foldable but should be deferred as well when plan cache is enabled.\n",
"// Note that, these functions must be foldable at first place, i.e, they are not in `unFoldableFunctions`.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"// IsDeferredFunctions checks whether the function is in DeferredFunctions.\n"
],
"file_path": "pkg/expression/function_traits.go",
"type": "add",
"edit_start_line_idx": 135
} | // Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prepare_test
import (
"context"
"fmt"
"math"
"math/rand"
"strconv"
"strings"
"testing"
"time"
"github.com/pingcap/tidb/pkg/errno"
"github.com/pingcap/tidb/pkg/executor"
"github.com/pingcap/tidb/pkg/expression"
"github.com/pingcap/tidb/pkg/infoschema"
"github.com/pingcap/tidb/pkg/kv"
"github.com/pingcap/tidb/pkg/metrics"
"github.com/pingcap/tidb/pkg/parser"
"github.com/pingcap/tidb/pkg/parser/auth"
"github.com/pingcap/tidb/pkg/planner/core"
"github.com/pingcap/tidb/pkg/session"
sessiontypes "github.com/pingcap/tidb/pkg/session/types"
"github.com/pingcap/tidb/pkg/sessionctx/variable"
"github.com/pingcap/tidb/pkg/testkit"
"github.com/pingcap/tidb/pkg/util/hint"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
)
func TestPointGetPreparedPlan4PlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk1.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk1.MustExec("drop database if exists ps_text")
defer tk1.MustExec("drop database if exists ps_text")
tk1.MustExec("create database ps_text")
tk1.MustExec("use ps_text")
tk1.MustExec(`create table t (a int, b int, c int,
primary key k_a(a),
unique key k_b(b))`)
tk1.MustExec("insert into t values (1, 1, 1)")
tk1.MustExec("insert into t values (2, 2, 2)")
tk1.MustExec("insert into t values (3, 3, 3)")
pspk1Id, _, _, err := tk1.Session().PrepareStmt("select * from t where a = ?")
require.NoError(t, err)
tk1.Session().GetSessionVars().PreparedStmts[pspk1Id].(*core.PlanCacheStmt).StmtCacheable = false
ctx := context.Background()
// first time plan generated
_, err = tk1.Session().ExecutePreparedStmt(ctx, pspk1Id, expression.Args2Expressions4Test(0))
require.NoError(t, err)
// using the generated plan but with different params
_, err = tk1.Session().ExecutePreparedStmt(ctx, pspk1Id, expression.Args2Expressions4Test(nil))
require.NoError(t, err)
}
func TestRandomFlushPlanCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk2 := testkit.NewTestKit(t, store)
var err error
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t1(id int, a int, b int, key(a))")
tk.MustExec("create table t2(id int, a int, b int, key(a))")
tk.MustExec("prepare stmt1 from 'SELECT * from t1,t2 where t1.id = t2.id';")
tk.MustExec("prepare stmt2 from 'SELECT * from t1';")
tk.MustExec("prepare stmt3 from 'SELECT * from t1 where id = 1';")
tk.MustExec("prepare stmt4 from 'SELECT * from t2';")
tk.MustExec("prepare stmt5 from 'SELECT * from t2 where id = 1';")
tk2.MustExec("use test")
tk2.MustExec("prepare stmt1 from 'SELECT * from t1,t2 where t1.id = t2.id';")
tk2.MustExec("prepare stmt2 from 'SELECT * from t1';")
tk2.MustExec("prepare stmt3 from 'SELECT * from t1 where id = 1';")
tk2.MustExec("prepare stmt4 from 'SELECT * from t2';")
tk2.MustExec("prepare stmt5 from 'SELECT * from t2 where id = 1';")
prepareNum := 5
execStmts := make([]string, 0, prepareNum)
for i := 1; i <= prepareNum; i++ {
execStmt := fmt.Sprintf("execute stmt%d", i)
execStmts = append(execStmts, execStmt)
}
for i := 0; i < 10; i++ {
// Warm up to make sure all the plans are in the cache.
for _, execStmt := range execStmts {
tk.MustExec(execStmt)
tk.MustExec(execStmt)
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk2.MustExec(execStmt)
tk2.MustExec(execStmt)
tk2.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
}
for j := 0; j < 10; j++ {
session1PC, session2PC := "1", "1"
// random to flush the plan cache
randNum := rand.Intn(10)
if randNum == 0 {
session1PC, session2PC = "0", "0"
if j%2 == 0 {
err = tk.ExecToErr("admin flush instance plan_cache;")
} else {
err = tk2.ExecToErr("admin flush instance plan_cache;")
}
require.NoError(t, err)
} else if randNum == 1 {
session1PC = "0"
err = tk.ExecToErr("admin flush session plan_cache;")
require.NoError(t, err)
} else if randNum == 2 {
session2PC = "0"
err = tk2.ExecToErr("admin flush session plan_cache;")
require.NoError(t, err)
}
for _, execStmt := range execStmts {
tk.MustExec(execStmt)
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows(session1PC))
tk2.MustExec(execStmt)
tk2.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows(session2PC))
}
}
err = tk.ExecToErr("admin flush instance plan_cache;")
require.NoError(t, err)
}
err = tk.ExecToErr("admin flush global plan_cache;")
require.EqualError(t, err, "Do not support the 'admin flush global scope.'")
}
func TestPrepareCache(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int primary key, b int, c int, index idx1(b, a), index idx2(b))")
tk.MustExec("insert into t values(1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 1, 2)")
tk.MustExec(`prepare stmt1 from "select * from t use index(idx1) where a = ? and b = ?"`)
tk.MustExec(`prepare stmt2 from "select a, b from t use index(idx2) where b = ?"`)
tk.MustExec(`prepare stmt3 from "select * from t where a = ?"`)
tk.MustExec("set @a=1, @b=1")
// When executing one statement at the first time, we don't use cache, so we need to execute it at least twice to test the cache.
tk.MustQuery("execute stmt1 using @a, @b").Check(testkit.Rows("1 1 1"))
tk.MustQuery("execute stmt1 using @a, @b").Check(testkit.Rows("1 1 1"))
tk.MustQuery("execute stmt2 using @b").Check(testkit.Rows("1 1", "6 1"))
tk.MustQuery("execute stmt2 using @b").Check(testkit.Rows("1 1", "6 1"))
tk.MustQuery("execute stmt3 using @a").Check(testkit.Rows("1 1 1"))
tk.MustQuery("execute stmt3 using @a").Check(testkit.Rows("1 1 1"))
tk.MustExec(`prepare stmt4 from "select * from t where a > ?"`)
tk.MustExec("set @a=3")
tk.MustQuery("execute stmt4 using @a").Check(testkit.Rows("4 4 4", "5 5 5", "6 1 2"))
tk.MustQuery("execute stmt4 using @a").Check(testkit.Rows("4 4 4", "5 5 5", "6 1 2"))
tk.MustExec(`prepare stmt5 from "select c from t order by c"`)
tk.MustQuery("execute stmt5").Check(testkit.Rows("1", "2", "2", "3", "4", "5"))
tk.MustQuery("execute stmt5").Check(testkit.Rows("1", "2", "2", "3", "4", "5"))
tk.MustExec(`prepare stmt6 from "select distinct a from t order by a"`)
tk.MustQuery("execute stmt6").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
tk.MustQuery("execute stmt6").Check(testkit.Rows("1", "2", "3", "4", "5", "6"))
// test privilege change
rootSe := tk.Session()
tk.MustExec("drop table if exists tp")
tk.MustExec(`create table tp(c1 int, c2 int, primary key (c1))`)
tk.MustExec(`insert into tp values(1, 1), (2, 2), (3, 3)`)
tk.MustExec(`create user 'u_tp'@'localhost'`)
tk.MustExec(`grant select on test.tp to u_tp@'localhost';`)
// user u_tp
userSess := newSession(t, store, "test")
require.NoError(t, userSess.Auth(&auth.UserIdentity{Username: "u_tp", Hostname: "localhost"}, nil, nil, nil))
mustExec(t, userSess, `prepare ps_stp_r from 'select * from tp where c1 > ?'`)
mustExec(t, userSess, `set @p2 = 2`)
tk.SetSession(userSess)
tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3"))
tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3"))
tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3"))
// root revoke
tk.SetSession(rootSe)
tk.MustExec(`revoke all on test.tp from 'u_tp'@'localhost';`)
// user u_tp
tk.SetSession(userSess)
_, err := tk.Exec(`execute ps_stp_r using @p2`)
require.Error(t, err)
// grant again
tk.SetSession(rootSe)
tk.MustExec(`grant select on test.tp to u_tp@'localhost';`)
// user u_tp
tk.SetSession(userSess)
tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3"))
tk.MustQuery(`execute ps_stp_r using @p2`).Check(testkit.Rows("3 3"))
// restore
tk.SetSession(rootSe)
tk.MustExec("drop table if exists tp")
tk.MustExec(`DROP USER 'u_tp'@'localhost';`)
}
// dtype: tinyint, unsigned, float, decimal, year
// rtype: null, valid, out-of-range, invalid, str, exists
func randValue(tk *testkit.TestKit, tbl, col, dtype, rtype string) string {
if rtype == "" {
rtypes := []string{"null", "valid", "out-of-range", "invalid", "str", "exists"}
rtype = rtypes[rand.Intn(len(rtypes))]
}
if rtype == "null" {
return "null"
}
if rtype == "exists" {
res := tk.MustQuery(fmt.Sprintf("select %v from %v limit 1", col, tbl)).Rows()[0][0].(string)
if res == "<nil>" {
res = "null"
}
return res
}
switch dtype {
case "tinyint":
switch rtype {
case "valid":
return fmt.Sprintf("%v", -128+rand.Intn(256))
case "out-of-range":
return fmt.Sprintf("%v", 128+rand.Intn(1024))
case "invalid":
return "'invalid-tinyint'"
case "str":
return fmt.Sprintf("'%v'", -128+rand.Intn(256))
}
case "unsigned":
switch rtype {
case "valid":
return fmt.Sprintf("%v", rand.Intn(4294967295))
case "out-of-range":
return fmt.Sprintf("-%v", rand.Intn(4294967295))
case "invalid":
return "'invalid-unsigned-int'"
case "str":
return fmt.Sprintf("'%v'", rand.Intn(4294967295))
}
case "float":
switch rtype {
case "valid":
return fmt.Sprintf("%v%.4fE%v", []string{"+", "-"}[rand.Intn(2)], rand.Float32(), rand.Intn(38))
case "out-of-range":
return fmt.Sprintf("%v%.4fE%v", []string{"+", "-"}[rand.Intn(2)], rand.Float32(), rand.Intn(100)+38)
case "invalid":
return "'invalid-float'"
case "str":
return fmt.Sprintf("'%v%.4fE%v'", []string{"+", "-"}[rand.Intn(2)], rand.Float32(), rand.Intn(38))
}
case "decimal": // (10,2)
switch rtype {
case "valid":
return fmt.Sprintf("%v%v.%v", []string{"+", "-"}[rand.Intn(2)], rand.Intn(99999999), rand.Intn(100))
case "out-of-range":
switch rand.Intn(2) {
case 0:
return fmt.Sprintf("%v%v.%v", []string{"+", "-"}[rand.Intn(2)], rand.Intn(99999999), rand.Intn(100000)+100000)
case 1:
return fmt.Sprintf("%v%v.%v", []string{"+", "-"}[rand.Intn(2)], rand.Intn(99999999)+99999999+1, rand.Intn(100))
}
case "invalid":
return "'invalid-decimal'"
case "str":
return fmt.Sprintf("'%v%v.%v'", []string{"+", "-"}[rand.Intn(2)], rand.Intn(99999999), rand.Intn(100))
}
case "year":
switch rtype {
case "valid":
return fmt.Sprintf("%v", 1901+rand.Intn(2155-1901))
case "out-of-range":
return fmt.Sprintf("%v", 2156+rand.Intn(2155-1901))
case "invalid":
return "'invalid-year'"
case "str":
return fmt.Sprintf("'%v'", 1901+rand.Intn(2155-1901))
}
}
return "'invalid-type-" + dtype + "'"
}
func TestPrepareCacheChangingParamType(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t_tinyint, t_unsigned, t_float, t_decimal, t_year`)
tk.MustExec(`create table t_tinyint (a tinyint, b tinyint, key(a))`)
tk.MustExec(`create table t_unsigned (a int unsigned, b int unsigned, key(a))`)
tk.MustExec(`create table t_float(a float, b float, key(a))`)
tk.MustExec(`create table t_decimal(a decimal(10,2), b decimal(10,2), key(a))`)
tk.MustExec(`create table t_year(a year, b year, key(a))`)
for _, dtype := range []string{"tinyint", "unsigned", "float", "decimal", "year"} {
tbl := "t_" + dtype
for i := 0; i < 10; i++ {
tk.MustExec(fmt.Sprintf("insert into %v values (%v, %v)", tbl, randValue(nil, "", "", dtype, "valid"), randValue(nil, "", "", dtype, "valid")))
}
tk.MustExec(fmt.Sprintf("insert into %v values (null, null)", tbl))
tk.MustExec(fmt.Sprintf("insert into %v values (%v, null)", tbl, randValue(nil, "", "", dtype, "valid")))
tk.MustExec(fmt.Sprintf("insert into %v values (null, %v)", tbl, randValue(nil, "", "", dtype, "valid")))
for round := 0; round < 10; round++ {
tk.MustExec(fmt.Sprintf(`prepare s1 from 'select * from %v where a=?'`, tbl))
tk.MustExec(fmt.Sprintf(`prepare s2 from 'select * from %v where b=?'`, tbl))
tk.MustExec(fmt.Sprintf(`prepare s3 from 'select * from %v where a in (?, ?, ?)'`, tbl))
tk.MustExec(fmt.Sprintf(`prepare s4 from 'select * from %v where b in (?, ?, ?)'`, tbl))
tk.MustExec(fmt.Sprintf(`prepare s5 from 'select * from %v where a>?'`, tbl))
tk.MustExec(fmt.Sprintf(`prepare s6 from 'select * from %v where b>?'`, tbl))
tk.MustExec(fmt.Sprintf(`prepare s7 from 'select * from %v where a>? and b>?'`, tbl))
for query := 0; query < 10; query++ {
a1, a2, a3 := randValue(tk, tbl, "a", dtype, ""), randValue(tk, tbl, "a", dtype, ""), randValue(tk, tbl, "a", dtype, "")
b1, b2, b3 := randValue(tk, tbl, "b", dtype, ""), randValue(tk, tbl, "b", dtype, ""), randValue(tk, tbl, "b", dtype, "")
tk.MustExec(fmt.Sprintf(`set @a1=%v,@a2=%v,@a3=%v`, a1, a2, a3))
tk.MustExec(fmt.Sprintf(`set @b1=%v,@b2=%v,@b3=%v`, b1, b2, b3))
compareResult := func(sql1, sql2 string) {
raw, err := tk.Exec(sql1)
if err != nil {
require.Error(t, tk.ExecToErr(sql2))
return
}
rs := tk.ResultSetToResult(raw, fmt.Sprintf("sql1:%s, sql2:%v", sql1, sql2))
rs.Sort().Check(tk.MustQuery(sql2).Sort().Rows())
}
compareResult(`execute s1 using @a1`, fmt.Sprintf(`select * from %v where a=%v`, tbl, a1))
compareResult(`execute s2 using @b1`, fmt.Sprintf(`select * from %v where b=%v`, tbl, b1))
compareResult(`execute s3 using @a1,@a2,@a3`, fmt.Sprintf(`select * from %v where a in (%v,%v,%v)`, tbl, a1, a2, a3))
compareResult(`execute s4 using @b1,@b2,@b3`, fmt.Sprintf(`select * from %v where b in (%v,%v,%v)`, tbl, b1, b2, b3))
compareResult(`execute s5 using @a1`, fmt.Sprintf(`select * from %v where a>%v`, tbl, a1))
compareResult(`execute s6 using @b1`, fmt.Sprintf(`select * from %v where b>%v`, tbl, b1))
compareResult(`execute s7 using @a1,@b1`, fmt.Sprintf(`select * from %v where a>%v and b>%v`, tbl, a1, b1))
}
}
}
}
func TestPrepareCacheDeferredFunction(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("create table t1 (id int PRIMARY KEY, c1 TIMESTAMP(3) NOT NULL DEFAULT '2019-01-14 10:43:20', KEY idx1 (c1))")
tk.MustExec("prepare sel1 from 'select id, c1 from t1 where c1 < now(3)'")
sql1 := "execute sel1"
expectedPattern := `IndexReader\(Index\(t1.idx1\)\[\[-inf,[0-9]{4}-(0[1-9]|1[0-2])-(0[1-9]|[1-2][0-9]|3[0-1]) (2[0-3]|[01][0-9]):[0-5][0-9]:[0-5][0-9].[0-9][0-9][0-9]\)\]\)`
var cnt [2]float64
var planStr [2]string
metrics.ResettablePlanCacheCounterFortTest = true
metrics.PlanCacheCounter.Reset()
counter := metrics.PlanCacheCounter.WithLabelValues("prepare")
ctx := context.TODO()
p := parser.New()
p.SetParserConfig(parser.ParserConfig{EnableWindowFunction: true, EnableStrictDoubleTypeCheck: true})
for i := 0; i < 2; i++ {
stmt, err := p.ParseOneStmt(sql1, "", "")
require.NoError(t, err)
is := tk.Session().GetInfoSchema().(infoschema.InfoSchema)
builder, _ := core.NewPlanBuilder().Init(tk.Session(), is, hint.NewQBHintHandler(nil))
p, err := builder.Build(ctx, stmt)
require.NoError(t, err)
execPlan, ok := p.(*core.Execute)
require.True(t, ok)
err = executor.ResetContextOfStmt(tk.Session(), stmt)
require.NoError(t, err)
plan, _, err := core.GetPlanFromSessionPlanCache(ctx, tk.Session(), false, is, execPlan.PrepStmt, execPlan.Params)
require.NoError(t, err)
planStr[i] = core.ToString(plan)
require.Regexpf(t, expectedPattern, planStr[i], "for %dth %s", i, sql1)
pb := &dto.Metric{}
err = counter.Write(pb)
require.NoError(t, err)
cnt[i] = pb.GetCounter().GetValue()
require.Equal(t, float64(i), cnt[i])
time.Sleep(time.Millisecond * 10)
}
require.Lessf(t, planStr[0], planStr[1], "plan 1: %v, plan 2: %v", planStr[0], planStr[1])
}
func TestPrepareCacheNow(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec(`prepare stmt1 from "select now(), current_timestamp(), utc_timestamp(), unix_timestamp(), sleep(0.1), now(), current_timestamp(), utc_timestamp(), unix_timestamp()"`)
// When executing one statement at the first time, we don't usTestPrepareCacheDeferredFunctione cache, so we need to execute it at least twice to test the cache.
_ = tk.MustQuery("execute stmt1").Rows()
rs := tk.MustQuery("execute stmt1").Rows()
require.Equal(t, rs[0][5].(string), rs[0][0].(string))
require.Equal(t, rs[0][6].(string), rs[0][1].(string))
require.Equal(t, rs[0][7].(string), rs[0][2].(string))
require.Equal(t, rs[0][8].(string), rs[0][3].(string))
}
func TestPrepareOverMaxPreparedStmtCount(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
// test prepare and deallocate.
prePrepared := readGaugeInt(metrics.PreparedStmtGauge)
tk.MustExec(`prepare stmt1 from "select 1"`)
onePrepared := readGaugeInt(metrics.PreparedStmtGauge)
require.Equal(t, onePrepared, prePrepared+1)
tk.MustExec(`deallocate prepare stmt1`)
deallocPrepared := readGaugeInt(metrics.PreparedStmtGauge)
require.Equal(t, deallocPrepared, prePrepared)
// test change global limit and make it affected in test session.
tk.MustQuery("select @@max_prepared_stmt_count").Check(testkit.Rows("-1"))
tk.MustExec("set @@global.max_prepared_stmt_count = 2")
tk.MustQuery("select @@global.max_prepared_stmt_count").Check(testkit.Rows("2"))
// test close session to give up all prepared stmt
tk.MustExec(`prepare stmt2 from "select 1"`)
prePrepared = readGaugeInt(metrics.PreparedStmtGauge)
tk.Session().Close()
drawPrepared := readGaugeInt(metrics.PreparedStmtGauge)
require.Equal(t, drawPrepared, prePrepared-1)
// test meet max limit.
tk.RefreshSession()
tk.MustQuery("select @@max_prepared_stmt_count").Check(testkit.Rows("2"))
for i := 1; ; i++ {
prePrepared = readGaugeInt(metrics.PreparedStmtGauge)
if prePrepared >= 2 {
tk.MustGetErrCode(`prepare stmt`+strconv.Itoa(i)+` from "select 1"`, errno.ErrMaxPreparedStmtCountReached)
break
}
tk.MustExec(`prepare stmt` + strconv.Itoa(i) + ` from "select 1"`)
}
}
// nolint:unused
func readGaugeInt(g prometheus.Gauge) int {
ch := make(chan prometheus.Metric, 1)
g.Collect(ch)
m := <-ch
mm := &dto.Metric{}
err := m.Write(mm)
if err != nil {
panic(err)
}
return int(mm.GetGauge().GetValue())
}
func TestPrepareWithSnapshot(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
safePointName := "tikv_gc_safe_point"
safePointValue := "20060102-15:04:05 -0700"
safePointComment := "All versions after safe point can be accessed. (DO NOT EDIT)"
updateSafePoint := fmt.Sprintf(`INSERT INTO mysql.tidb VALUES ('%[1]s', '%[2]s', '%[3]s')
ON DUPLICATE KEY
UPDATE variable_value = '%[2]s', comment = '%[3]s'`, safePointName, safePointValue, safePointComment)
tk.MustExec(updateSafePoint)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int primary key, v int)")
tk.MustExec("insert into t select 1, 2")
tk.MustExec("begin")
ts := tk.MustQuery("select @@tidb_current_ts").Rows()[0][0].(string)
tk.MustExec("commit")
tk.MustExec("update t set v = 3 where id = 1")
tk.MustExec("prepare s1 from 'select * from t where id = 1';")
tk.MustExec("prepare s2 from 'select * from t';")
tk.MustExec("set @@tidb_snapshot = " + ts)
tk.MustQuery("execute s1").Check(testkit.Rows("1 2"))
tk.MustQuery("execute s2").Check(testkit.Rows("1 2"))
}
func TestPrepareCacheForPartition(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
for _, pruneMode := range []string{string(variable.Static), string(variable.Dynamic)} {
tk.MustExec("set @@tidb_partition_prune_mode = '" + pruneMode + "'")
// Test for PointGet and IndexRead.
tk.MustExec("drop table if exists t_index_read")
tk.MustExec("create table t_index_read (id int, k int, c varchar(10), primary key (id, k)) partition by hash(id+k) partitions 10")
tk.MustExec("insert into t_index_read values (1, 2, 'abc'), (3, 4, 'def'), (5, 6, 'xyz')")
tk.MustExec("prepare stmt1 from 'select c from t_index_read where id = ? and k = ?;'")
tk.MustExec("set @id=1, @k=2")
// When executing one statement at the first time, we don't use cache, so we need to execute it at least twice to test the cache.
tk.MustQuery("execute stmt1 using @id, @k").Check(testkit.Rows("abc"))
tk.MustQuery("execute stmt1 using @id, @k").Check(testkit.Rows("abc"))
tk.MustExec("set @id=5, @k=6")
tk.MustQuery("execute stmt1 using @id, @k").Check(testkit.Rows("xyz"))
tk.MustExec("prepare stmt2 from 'select c from t_index_read where id = ? and k = ? and 1 = 1;'")
tk.MustExec("set @id=1, @k=2")
tk.MustQuery("execute stmt2 using @id, @k").Check(testkit.Rows("abc"))
tk.MustQuery("execute stmt2 using @id, @k").Check(testkit.Rows("abc"))
tk.MustExec("set @id=5, @k=6")
tk.MustQuery("execute stmt2 using @id, @k").Check(testkit.Rows("xyz"))
// Test for TableScan.
tk.MustExec("drop table if exists t_table_read")
tk.MustExec("create table t_table_read (id int, k int, c varchar(10), primary key(id)) partition by hash(id) partitions 10")
tk.MustExec("insert into t_table_read values (1, 2, 'abc'), (3, 4, 'def'), (5, 6, 'xyz')")
tk.MustExec("prepare stmt3 from 'select c from t_index_read where id = ?;'")
tk.MustExec("set @id=1")
// When executing one statement at the first time, we don't use cache, so we need to execute it at least twice to test the cache.
tk.MustQuery("execute stmt3 using @id").Check(testkit.Rows("abc"))
tk.MustQuery("execute stmt3 using @id").Check(testkit.Rows("abc"))
tk.MustExec("set @id=5")
tk.MustQuery("execute stmt3 using @id").Check(testkit.Rows("xyz"))
tk.MustExec("prepare stmt4 from 'select c from t_index_read where id = ? and k = ?'")
tk.MustExec("set @id=1, @k=2")
tk.MustQuery("execute stmt4 using @id, @k").Check(testkit.Rows("abc"))
tk.MustQuery("execute stmt4 using @id, @k").Check(testkit.Rows("abc"))
tk.MustExec("set @id=5, @k=6")
tk.MustQuery("execute stmt4 using @id, @k").Check(testkit.Rows("xyz"))
// Query on range partition tables should not raise error.
tk.MustExec("drop table if exists t_range_index")
tk.MustExec("create table t_range_index (id int, k int, c varchar(10), primary key(id)) partition by range(id) ( PARTITION p0 VALUES LESS THAN (4), PARTITION p1 VALUES LESS THAN (14),PARTITION p2 VALUES LESS THAN (20) )")
tk.MustExec("insert into t_range_index values (1, 2, 'abc'), (5, 4, 'def'), (13, 6, 'xyz'), (17, 6, 'hij')")
tk.MustExec("prepare stmt5 from 'select c from t_range_index where id = ?'")
tk.MustExec("set @id=1")
tk.MustQuery("execute stmt5 using @id").Check(testkit.Rows("abc"))
tk.MustQuery("execute stmt5 using @id").Check(testkit.Rows("abc"))
tk.MustExec("set @id=5")
tk.MustQuery("execute stmt5 using @id").Check(testkit.Rows("def"))
tk.MustQuery("execute stmt5 using @id").Check(testkit.Rows("def"))
tk.MustExec("set @id=13")
tk.MustQuery("execute stmt5 using @id").Check(testkit.Rows("xyz"))
tk.MustExec("set @id=17")
tk.MustQuery("execute stmt5 using @id").Check(testkit.Rows("hij"))
tk.MustExec("drop table if exists t_range_table")
tk.MustExec("create table t_range_table (id int, k int, c varchar(10)) partition by range(id) ( PARTITION p0 VALUES LESS THAN (4), PARTITION p1 VALUES LESS THAN (14),PARTITION p2 VALUES LESS THAN (20) )")
tk.MustExec("insert into t_range_table values (1, 2, 'abc'), (5, 4, 'def'), (13, 6, 'xyz'), (17, 6, 'hij')")
tk.MustExec("prepare stmt6 from 'select c from t_range_table where id = ?'")
tk.MustExec("set @id=1")
tk.MustQuery("execute stmt6 using @id").Check(testkit.Rows("abc"))
tk.MustQuery("execute stmt6 using @id").Check(testkit.Rows("abc"))
tk.MustExec("set @id=5")
tk.MustQuery("execute stmt6 using @id").Check(testkit.Rows("def"))
tk.MustQuery("execute stmt6 using @id").Check(testkit.Rows("def"))
tk.MustExec("set @id=13")
tk.MustQuery("execute stmt6 using @id").Check(testkit.Rows("xyz"))
tk.MustExec("set @id=17")
tk.MustQuery("execute stmt6 using @id").Check(testkit.Rows("hij"))
// Test for list partition
tk.MustExec("drop table if exists t_list_index")
tk.MustExec("create table t_list_index (id int, k int, c varchar(10), primary key(id)) partition by list (id*2-id) ( PARTITION p0 VALUES IN (1,2,3,4), PARTITION p1 VALUES IN (5,6,7,8),PARTITION p2 VALUES IN (9,10,11,12))")
tk.MustExec("insert into t_list_index values (1, 1, 'abc'), (5, 5, 'def'), (9, 9, 'xyz'), (12, 12, 'hij')")
tk.MustExec("prepare stmt7 from 'select c from t_list_index where id = ?'")
tk.MustExec("set @id=1")
tk.MustQuery("execute stmt7 using @id").Check(testkit.Rows("abc"))
tk.MustQuery("execute stmt7 using @id").Check(testkit.Rows("abc"))
tk.MustExec("set @id=5")
tk.MustQuery("execute stmt7 using @id").Check(testkit.Rows("def"))
tk.MustQuery("execute stmt7 using @id").Check(testkit.Rows("def"))
tk.MustExec("set @id=9")
tk.MustQuery("execute stmt7 using @id").Check(testkit.Rows("xyz"))
tk.MustExec("set @id=12")
tk.MustQuery("execute stmt7 using @id").Check(testkit.Rows("hij"))
tk.MustExec("set @id=100")
tk.MustQuery("execute stmt7 using @id").Check(testkit.Rows())
// Test for list columns partition
tk.MustExec("drop table if exists t_list_index")
tk.MustExec("create table t_list_index (id int, k int, c varchar(10), primary key(id)) partition by list columns (id) ( PARTITION p0 VALUES IN (1,2,3,4), PARTITION p1 VALUES IN (5,6,7,8),PARTITION p2 VALUES IN (9,10,11,12))")
tk.MustExec("insert into t_list_index values (1, 1, 'abc'), (5, 5, 'def'), (9, 9, 'xyz'), (12, 12, 'hij')")
tk.MustExec("prepare stmt8 from 'select c from t_list_index where id = ?'")
tk.MustExec("set @id=1")
tk.MustQuery("execute stmt8 using @id").Check(testkit.Rows("abc"))
tk.MustQuery("execute stmt8 using @id").Check(testkit.Rows("abc"))
tk.MustExec("set @id=5")
tk.MustQuery("execute stmt8 using @id").Check(testkit.Rows("def"))
tk.MustQuery("execute stmt8 using @id").Check(testkit.Rows("def"))
tk.MustExec("set @id=9")
tk.MustQuery("execute stmt8 using @id").Check(testkit.Rows("xyz"))
tk.MustExec("set @id=12")
tk.MustQuery("execute stmt8 using @id").Check(testkit.Rows("hij"))
tk.MustExec("set @id=100")
tk.MustQuery("execute stmt8 using @id").Check(testkit.Rows())
// https://github.com/pingcap/tidb/issues/33031
tk.MustExec(`drop table if exists Issue33031`)
tk.MustExec(`CREATE TABLE Issue33031 (COL1 int(16) DEFAULT '29' COMMENT 'NUMERIC UNIQUE INDEX', COL2 bigint(20) DEFAULT NULL, UNIQUE KEY UK_COL1 (COL1)) PARTITION BY RANGE (COL1) (PARTITION P0 VALUES LESS THAN (0))`)
tk.MustExec(`insert into Issue33031 values(-5, 7)`)
tk.MustExec(`prepare stmt from 'select *,? from Issue33031 where col2 < ? and col1 in (?, ?)'`)
tk.MustExec(`set @a=111, @b=1, @c=2, @d=22`)
tk.MustQuery(`execute stmt using @d,@a,@b,@c`).Check(testkit.Rows())
tk.MustExec(`set @a=112, @b=-2, @c=-5, @d=33`)
tk.MustQuery(`execute stmt using @d,@a,@b,@c`).Check(testkit.Rows("-5 7 33"))
if pruneMode == string(variable.Dynamic) {
// When the temporary disabling of prepared plan cache for dynamic partition prune mode is disabled, change this to 1!
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0"))
}
}
}
func newSession(t *testing.T, store kv.Storage, dbName string) sessiontypes.Session {
se, err := session.CreateSession4Test(store)
require.NoError(t, err)
mustExec(t, se, "create database if not exists "+dbName)
mustExec(t, se, "use "+dbName)
return se
}
func mustExec(t *testing.T, se sessiontypes.Session, sql string) {
_, err := se.Execute(context.Background(), sql)
require.NoError(t, err)
}
func TestPlanCacheUnionScan(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec(`set tidb_enable_non_prepared_plan_cache=0`) // insert-tmt can hit the cache and affect hit counter in this UT
pb := &dto.Metric{}
metrics.ResettablePlanCacheCounterFortTest = true
metrics.PlanCacheCounter.Reset()
counter := metrics.PlanCacheCounter.WithLabelValues("prepare")
tk.MustExec("use test")
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec("create table t1(a int not null)")
tk.MustExec("create table t2(a int not null)")
tk.MustExec("prepare stmt1 from 'select * from t1 where a > ?'")
tk.MustExec("set @p0 = 0")
tk.MustQuery("execute stmt1 using @p0").Check(testkit.Rows())
tk.MustExec("begin")
tk.MustQuery("execute stmt1 using @p0").Check(testkit.Rows())
err := counter.Write(pb)
require.NoError(t, err)
cnt := pb.GetCounter().GetValue()
require.Equal(t, float64(1), cnt)
tk.MustExec("insert into t1 values(1)")
// Cached plan is invalid now, it is not chosen and removed.
tk.MustQuery("execute stmt1 using @p0").Check(testkit.Rows(
"1",
))
err = counter.Write(pb)
require.NoError(t, err)
cnt = pb.GetCounter().GetValue()
require.Equal(t, float64(1), cnt)
tk.MustExec("insert into t2 values(1)")
// Cached plan is chosen, modification on t2 does not impact plan of t1.
tk.MustQuery("execute stmt1 using @p0").Check(testkit.Rows(
"1",
))
err = counter.Write(pb)
require.NoError(t, err)
cnt = pb.GetCounter().GetValue()
require.Equal(t, float64(2), cnt)
tk.MustExec("rollback")
// Though cached plan contains UnionScan, it does not impact correctness, so it is reused.
tk.MustQuery("execute stmt1 using @p0").Check(testkit.Rows())
err = counter.Write(pb)
require.NoError(t, err)
cnt = pb.GetCounter().GetValue()
require.Equal(t, float64(3), cnt)
tk.MustExec("prepare stmt2 from 'select * from t1 left join t2 on true where t1.a > ?'")
tk.MustQuery("execute stmt2 using @p0").Check(testkit.Rows())
tk.MustExec("begin")
tk.MustQuery("execute stmt2 using @p0").Check(testkit.Rows())
err = counter.Write(pb)
require.NoError(t, err)
cnt = pb.GetCounter().GetValue()
require.Equal(t, float64(4), cnt)
tk.MustExec("insert into t1 values(1)")
// Cached plan is invalid now, it is not chosen and removed.
tk.MustQuery("execute stmt2 using @p0").Check(testkit.Rows(
"1 <nil>",
))
err = counter.Write(pb)
require.NoError(t, err)
cnt = pb.GetCounter().GetValue()
require.Equal(t, float64(4), cnt)
tk.MustExec("insert into t2 values(1)")
// Cached plan is invalid now, it is not chosen and removed.
tk.MustQuery("execute stmt2 using @p0").Check(testkit.Rows(
"1 1",
))
err = counter.Write(pb)
require.NoError(t, err)
cnt = pb.GetCounter().GetValue()
require.Equal(t, float64(4), cnt)
// Cached plan is reused.
tk.MustQuery("execute stmt2 using @p0").Check(testkit.Rows(
"1 1",
))
err = counter.Write(pb)
require.NoError(t, err)
cnt = pb.GetCounter().GetValue()
require.Equal(t, float64(5), cnt)
tk.MustExec("rollback")
// Though cached plan contains UnionScan, it does not impact correctness, so it is reused.
tk.MustQuery("execute stmt2 using @p0").Check(testkit.Rows())
err = counter.Write(pb)
require.NoError(t, err)
cnt = pb.GetCounter().GetValue()
require.Equal(t, float64(6), cnt)
}
func TestPlanCacheSwitchDB(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
// create a table in test
tk.MustExec(`use test`)
tk.MustExec(`drop table if exists t`)
tk.MustExec(`create table t(a int)`)
tk.MustExec(`insert into t values (-1)`)
tk.MustExec(`prepare stmt from 'select * from t'`)
// DB is not specified
se2, err := session.CreateSession4TestWithOpt(store, &session.Opt{
PreparedPlanCache: core.NewLRUPlanCache(100, 0.1, math.MaxUint64, tk.Session(), false),
})
require.NoError(t, err)
tk2 := testkit.NewTestKitWithSession(t, store, se2)
require.Equal(t, tk2.ExecToErr(`prepare stmt from 'select * from t'`).Error(), "[planner:1046]No database selected")
require.Equal(t, tk2.ExecToErr(`prepare stmt from 'select * from test.t'`), nil)
// switch to a new DB
tk.MustExec(`drop database if exists plan_cache`)
tk.MustExec(`create database plan_cache`)
tk.MustExec(`use plan_cache`)
tk.MustExec(`create table t(a int)`)
tk.MustExec(`insert into t values (1)`)
tk.MustQuery(`execute stmt`).Check(testkit.Rows("-1")) // read test.t
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0"))
tk.MustQuery(`execute stmt`).Check(testkit.Rows("-1")) // read test.t
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1"))
// prepare again
tk.MustExec(`prepare stmt from 'select * from t'`)
tk.MustQuery(`execute stmt`).Check(testkit.Rows("1")) // read plan_cache.t
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0"))
tk.MustQuery(`execute stmt`).Check(testkit.Rows("1")) // read plan_cache.t
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1"))
// specify DB in the query
tk.MustExec(`prepare stmt from 'select * from test.t'`)
tk.MustQuery(`execute stmt`).Check(testkit.Rows("-1")) // read test.t
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0"))
tk.MustQuery(`execute stmt`).Check(testkit.Rows("-1")) // read test.t
tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1"))
}
func TestInvisibleIndexPrepare(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, unique idx_a(a))")
tk.MustExec("insert into t values(1)")
tk.MustExec(`prepare stmt1 from "select a from t order by a"`)
tk.MustQuery("execute stmt1").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt1").Check(testkit.Rows("1"))
require.Len(t, tk.Session().GetSessionVars().StmtCtx.IndexNames, 1)
require.Equal(t, "t:idx_a", tk.Session().GetSessionVars().StmtCtx.IndexNames[0])
tk.MustExec("alter table t alter index idx_a invisible")
tk.MustQuery("execute stmt1").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt1").Check(testkit.Rows("1"))
require.Len(t, tk.Session().GetSessionVars().StmtCtx.IndexNames, 0)
tk.MustExec("alter table t alter index idx_a visible")
tk.MustQuery("execute stmt1").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt1").Check(testkit.Rows("1"))
require.Len(t, tk.Session().GetSessionVars().StmtCtx.IndexNames, 1)
require.Equal(t, "t:idx_a", tk.Session().GetSessionVars().StmtCtx.IndexNames[0])
}
func TestPlanCacheSnapshot(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(id int)")
tk.MustExec("insert into t values (1),(2),(3),(4)")
// For mocktikv, safe point is not initialized, we manually insert it for snapshot to use.
timeSafe := time.Now().Add(-48 * 60 * 60 * time.Second).Format("20060102-15:04:05 -0700 MST")
safePointSQL := `INSERT HIGH_PRIORITY INTO mysql.tidb VALUES ('tikv_gc_safe_point', '%[1]s', '')
ON DUPLICATE KEY
UPDATE variable_value = '%[1]s'`
tk.MustExec(fmt.Sprintf(safePointSQL, timeSafe))
tk.MustExec("prepare stmt from 'select * from t where id=?'")
tk.MustExec("set @p = 1")
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt using @p").Check(testkit.Rows("1"))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt using @p").Check(testkit.Rows("1"))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
// Record the current tso.
tk.MustExec("begin")
tso := tk.Session().GetSessionVars().TxnCtx.StartTS
tk.MustExec("rollback")
require.True(t, tso > 0)
// Insert one more row with id = 1.
tk.MustExec("insert into t values (1)")
tk.MustExec(fmt.Sprintf("set @@tidb_snapshot = '%d'", tso))
tk.MustQuery("select * from t where id = 1").Check(testkit.Rows("1"))
tk.MustQuery("execute stmt using @p").Check(testkit.Rows("1"))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
}
func TestPartitionTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
// enable partition table dynamic mode
tk.MustExec("create database test_plan_cache")
tk.MustExec("use test_plan_cache")
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
tk.MustExec("set @@tidb_enable_list_partition = 1")
type testcase struct {
t1Create string
t2Create string
rowGener func() string
varGener func() string
query string
}
randDateTime := func() string {
return fmt.Sprintf("%v-%v-%v %v:%v:%v",
1950+rand.Intn(100), 1+rand.Intn(12), 1+rand.Intn(28), // date
rand.Intn(24), rand.Intn(60), rand.Intn(60)) // time
}
randDate := func() string {
return fmt.Sprintf("%v-%v-%v", 1950+rand.Intn(100), 1+rand.Intn(12), 1+rand.Intn(28))
}
testcases := []testcase{
{ // hash partition + int
"create table t1(a int, b int) partition by hash(a) partitions 20",
"create table t2(a int, b int)",
func() string { return fmt.Sprintf("(%v, %v)", rand.Intn(100000000), rand.Intn(100000000)) },
func() string { return fmt.Sprintf("%v", rand.Intn(100000000)) },
`select * from %v where a > ?`,
},
{ // range partition + int
`create table t1(a int, b int) partition by range(a) (
partition p0 values less than (20000000),
partition p1 values less than (40000000),
partition p2 values less than (60000000),
partition p3 values less than (80000000),
partition p4 values less than (100000000))`,
`create table t2(a int, b int)`,
func() string { return fmt.Sprintf("(%v, %v)", rand.Intn(100000000), rand.Intn(100000000)) },
func() string { return fmt.Sprintf("%v", rand.Intn(100000000)) },
`select * from %v where a > ?`,
},
{ // range partition + varchar
`create table t1(a varchar(10), b varchar(10)) partition by range columns(a) (
partition p0 values less than ('200'),
partition p1 values less than ('400'),
partition p2 values less than ('600'),
partition p3 values less than ('800'),
partition p4 values less than ('9999'))`,
`create table t2(a varchar(10), b varchar(10))`,
func() string { return fmt.Sprintf(`("%v", "%v")`, rand.Intn(1000), rand.Intn(1000)) },
func() string { return fmt.Sprintf(`"%v"`, rand.Intn(1000)) },
`select * from %v where a > ?`,
},
{ // range partition + datetime
`create table t1(a datetime, b datetime) partition by range columns(a) (
partition p0 values less than ('1970-01-01 00:00:00'),
partition p1 values less than ('1990-01-01 00:00:00'),
partition p2 values less than ('2010-01-01 00:00:00'),
partition p3 values less than ('2030-01-01 00:00:00'),
partition p4 values less than ('2060-01-01 00:00:00'))`,
`create table t2(a datetime, b datetime)`,
func() string { return fmt.Sprintf(`("%v", "%v")`, randDateTime(), randDateTime()) },
func() string { return fmt.Sprintf(`"%v"`, randDateTime()) },
`select * from %v where a > ?`,
},
{ // range partition + date
`create table t1(a date, b date) partition by range columns(a) (
partition p0 values less than ('1970-01-01'),
partition p1 values less than ('1990-01-01'),
partition p2 values less than ('2010-01-01'),
partition p3 values less than ('2030-01-01'),
partition p4 values less than ('2060-01-01'))`,
`create table t2(a date, b date)`,
func() string { return fmt.Sprintf(`("%v", "%v")`, randDate(), randDate()) },
func() string { return fmt.Sprintf(`"%v"`, randDate()) },
`select * from %v where a > ?`,
},
{ // list partition + int
`create table t1(a int, b int) partition by list(a) (
partition p0 values in (0, 1, 2, 3, 4),
partition p1 values in (5, 6, 7, 8, 9),
partition p2 values in (10, 11, 12, 13, 14),
partition p3 values in (15, 16, 17, 18, 19))`,
`create table t2(a int, b int)`,
func() string { return fmt.Sprintf("(%v, %v)", rand.Intn(20), rand.Intn(20)) },
func() string { return fmt.Sprintf("%v", rand.Intn(20)) },
`select * from %v where a > ?`,
},
}
for _, tc := range testcases {
// create tables and insert some records
tk.MustExec("drop table if exists t1")
tk.MustExec("drop table if exists t2")
tk.MustExec(tc.t1Create)
tk.MustExec(tc.t2Create)
vals := make([]string, 0, 2048)
for i := 0; i < 2048; i++ {
vals = append(vals, tc.rowGener())
}
tk.MustExec(fmt.Sprintf("insert into t1 values %s", strings.Join(vals, ",")))
tk.MustExec(fmt.Sprintf("insert into t2 values %s", strings.Join(vals, ",")))
// the first query, @last_plan_from_cache should be zero
tk.MustExec(fmt.Sprintf(`prepare stmt1 from "%s"`, fmt.Sprintf(tc.query, "t1")))
tk.MustExec(fmt.Sprintf(`prepare stmt2 from "%s"`, fmt.Sprintf(tc.query, "t2")))
tk.MustExec(fmt.Sprintf("set @a=%v", tc.varGener()))
result1 := tk.MustQuery("execute stmt1 using @a").Sort().Rows()
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt2 using @a").Sort().Check(result1)
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
for i := 0; i < 100; i++ {
tk.MustExec(fmt.Sprintf("set @a=%v", tc.varGener()))
result1 := tk.MustQuery("execute stmt1 using @a").Sort().Rows()
// When https://github.com/pingcap/tidb/pull/33098 is reverted this should be 1 again
tk.MustQuery("select @@last_plan_from_cache /* i=" + strconv.Itoa(i) + " prepared statement: (t1) " + tc.query + "\n-- create table: " + tc.t1Create + "*/").Check(testkit.Rows("0"))
tk.MustQuery("execute stmt2 using @a").Sort().Check(result1)
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
}
}
}
func helperCheckPlanCache(t *testing.T, tk *testkit.TestKit, sql, expected string, arr []string) []string {
res := tk.MustQuery(sql)
got := res.Rows()[0][0]
if expected == "0" {
require.Equal(t, expected, got, sql)
} else {
if got != expected {
return append(arr, sql)
}
}
return arr
}
func TestPartitionWithVariedDataSources(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
// enable partition table dynamic mode
tk.MustExec("create database test_plan_cache2")
tk.MustExec("use test_plan_cache2")
tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'")
// prepare tables
tk.MustExec(`create table trangePK (a int primary key, b int) partition by range (a) (
partition p0 values less than (10000),
partition p1 values less than (20000),
partition p2 values less than (30000),
partition p3 values less than (40000))`)
tk.MustExec(`create table thashPK (a int primary key, b int) partition by hash (a) partitions 4`)
tk.MustExec(`create table tnormalPK (a int primary key, b int)`)
tk.MustExec(`create table trangeIdx (a int unique key, b int) partition by range (a) (
partition p0 values less than (10000),
partition p1 values less than (20000),
partition p2 values less than (30000),
partition p3 values less than (40000))`)
tk.MustExec(`create table thashIdx (a int unique key, b int) partition by hash (a) partitions 4`)
tk.MustExec(`create table tnormalIdx (a int unique key, b int)`)
uniqueVals := make(map[int]struct{})
vals := make([]string, 0, 1000)
for len(vals) < 1000 {
a := rand.Intn(40000)
if _, ok := uniqueVals[a]; ok {
continue
}
uniqueVals[a] = struct{}{}
b := rand.Intn(40000)
vals = append(vals, fmt.Sprintf("(%v, %v)", a, b))
}
for _, tbl := range []string{"trangePK", "thashPK", "tnormalPK", "trangeIdx", "thashIdx", "tnormalIdx"} {
tk.MustExec(fmt.Sprintf(`insert into %v values %v`, tbl, strings.Join(vals, ", ")))
tk.MustExec(`analyze table ` + tbl)
}
// TableReader, PointGet on PK, BatchGet on PK
for _, tbl := range []string{`trangePK`, `thashPK`, `tnormalPK`} {
tk.MustExec(fmt.Sprintf(`prepare stmt%v_tablescan from 'select * from %v use index(primary) where a > ? and a < ?'`, tbl, tbl))
tk.MustExec(fmt.Sprintf(`prepare stmt%v_pointget from 'select * from %v use index(primary) where a = ?'`, tbl, tbl))
tk.MustExec(fmt.Sprintf(`prepare stmt%v_batchget from 'select * from %v use index(primary) where a in (?, ?, ?)'`, tbl, tbl))
}
for i := 0; i < 100; i++ {
mina, maxa := rand.Intn(40000), rand.Intn(40000)
if mina > maxa {
mina, maxa = maxa, mina
}
tk.MustExec(fmt.Sprintf(`set @mina=%v, @maxa=%v`, mina, maxa))
tk.MustExec(fmt.Sprintf(`set @pointa=%v`, rand.Intn(40000)))
tk.MustExec(fmt.Sprintf(`set @a0=%v, @a1=%v, @a2=%v`, rand.Intn(40000), rand.Intn(40000), rand.Intn(40000)))
var rscan, rpoint, rbatch [][]interface{}
for id, tbl := range []string{`trangePK`, `thashPK`, `tnormalPK`} {
scan := tk.MustQuery(fmt.Sprintf(`execute stmt%v_tablescan using @mina, @maxa`, tbl)).Sort()
if id == 0 {
rscan = scan.Rows()
} else {
scan.Check(rscan)
}
point := tk.MustQuery(fmt.Sprintf(`execute stmt%v_pointget using @pointa`, tbl)).Sort()
if id == 0 {
rpoint = point.Rows()
} else {
point.Check(rpoint)
}
batch := tk.MustQuery(fmt.Sprintf(`execute stmt%v_batchget using @a0, @a1, @a2`, tbl)).Sort()
if id == 0 {
rbatch = batch.Rows()
} else {
batch.Check(rbatch)
}
}
}
// IndexReader, IndexLookUp, PointGet on Idx, BatchGet on Idx
for _, tbl := range []string{"trangeIdx", "thashIdx", "tnormalIdx"} {
tk.MustExec(fmt.Sprintf(`prepare stmt%v_indexscan from 'select a from %v use index(a) where a > ? and a < ?'`, tbl, tbl))
tk.MustExec(fmt.Sprintf(`prepare stmt%v_indexlookup from 'select * from %v use index(a) where a > ? and a < ?'`, tbl, tbl))
tk.MustExec(fmt.Sprintf(`prepare stmt%v_pointget_idx from 'select * from %v use index(a) where a = ?'`, tbl, tbl))
tk.MustExec(fmt.Sprintf(`prepare stmt%v_batchget_idx from 'select * from %v use index(a) where a in (?, ?, ?)'`, tbl, tbl))
}
loops := 100
missedPlanCache := make([]string, 0, 4)
for i := 0; i < loops; i++ {
mina, maxa := rand.Intn(40000), rand.Intn(40000)
if mina > maxa {
mina, maxa = maxa, mina
}
tk.MustExec(fmt.Sprintf(`set @mina=%v, @maxa=%v`, mina, maxa))
tk.MustExec(fmt.Sprintf(`set @pointa=%v`, rand.Intn(40000)))
tk.MustExec(fmt.Sprintf(`set @a0=%v, @a1=%v, @a2=%v`, rand.Intn(40000), rand.Intn(40000), rand.Intn(40000)))
var rscan, rlookup, rpoint, rbatch [][]interface{}
var expectedFromPlanCache string
for id, tbl := range []string{"trangeIdx", "thashIdx", "tnormalIdx"} {
scan := tk.MustQuery(fmt.Sprintf(`execute stmt%v_indexscan using @mina, @maxa`, tbl)).Sort()
if id == 2 {
expectedFromPlanCache = "1"
} else {
expectedFromPlanCache = "0"
}
tblStr := ` table: ` + tbl + " i :" + strconv.FormatInt(int64(i), 10) + " */"
if i > 0 {
missedPlanCache = helperCheckPlanCache(t, tk, `select @@last_plan_from_cache /* indexscan table: `+tblStr, expectedFromPlanCache, missedPlanCache)
}
if id == 0 {
rscan = scan.Rows()
} else {
scan.Check(rscan)
}
lookup := tk.MustQuery(fmt.Sprintf(`execute stmt%v_indexlookup using @mina, @maxa`, tbl)).Sort()
if i > 0 {
missedPlanCache = helperCheckPlanCache(t, tk, `select @@last_plan_from_cache /* indexlookup table: `+tblStr, expectedFromPlanCache, missedPlanCache)
}
if id == 0 {
rlookup = lookup.Rows()
} else {
lookup.Check(rlookup)
}
point := tk.MustQuery(fmt.Sprintf(`execute stmt%v_pointget_idx using @pointa`, tbl)).Sort()
if tbl == `tnormalPK` && i > 0 {
// PlanCache cannot support PointGet now since we haven't relocated partition after rebuilding range.
// Please see Execute.rebuildRange for more details.
missedPlanCache = helperCheckPlanCache(t, tk, `select @@last_plan_from_cache /* pointget table: `+tblStr, expectedFromPlanCache, missedPlanCache)
}
if id == 0 {
rpoint = point.Rows()
} else {
point.Check(rpoint)
}
batch := tk.MustQuery(fmt.Sprintf(`execute stmt%v_batchget_idx using @a0, @a1, @a2`, tbl)).Sort()
if i > 0 {
missedPlanCache = helperCheckPlanCache(t, tk, `select @@last_plan_from_cache /* batchget table: `+tblStr, expectedFromPlanCache, missedPlanCache)
}
if id == 0 {
rbatch = batch.Rows()
} else {
batch.Check(rbatch)
}
}
}
// Allow ~1% non-cached queries, due to background changes etc.
// (Actually just 1/3 %, since there are 3 tables * 4 queries per loop :)
if len(missedPlanCache) > (loops * 4 / 100) {
require.Equal(t, []string{}, missedPlanCache)
}
}
func TestCachedTable(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t (a int, b int, index i_b(b))")
tk.MustExec("insert into t values (1, 1), (2, 2)")
tk.MustExec("alter table t cache")
tk.MustExec("prepare tableScan from 'select * from t where a>=?'")
tk.MustExec("prepare indexScan from 'select b from t use index(i_b) where b>?'")
tk.MustExec("prepare indexLookup from 'select a from t use index(i_b) where b>? and b<?'")
tk.MustExec("prepare pointGet from 'select b from t use index(i_b) where b=?'")
tk.MustExec("set @a=1, @b=3")
lastReadFromCache := func(tk *testkit.TestKit) bool {
return tk.Session().GetSessionVars().StmtCtx.ReadFromTableCache
}
var cacheLoaded bool
for i := 0; i < 50; i++ {
tk.MustQuery("select * from t").Check(testkit.Rows("1 1", "2 2"))
if lastReadFromCache(tk) {
cacheLoaded = true
break
}
}
require.True(t, cacheLoaded)
// Cache the plan.
tk.MustQuery("execute tableScan using @a").Check(testkit.Rows("1 1", "2 2"))
tk.MustQuery("execute indexScan using @a").Check(testkit.Rows("2"))
tk.MustQuery("execute indexLookup using @a, @b").Check(testkit.Rows("2"))
tk.MustQuery("execute pointGet using @a").Check(testkit.Rows("1"))
// Table Scan
tk.MustQuery("execute tableScan using @a").Check(testkit.Rows("1 1", "2 2"))
require.True(t, lastReadFromCache(tk))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
// Index Scan
tk.MustQuery("execute indexScan using @a").Check(testkit.Rows("2"))
require.True(t, lastReadFromCache(tk))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
// IndexLookup
tk.MustQuery("execute indexLookup using @a, @b").Check(testkit.Rows("2"))
require.True(t, lastReadFromCache(tk))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) // b>1 and b<3 --> b=2
// PointGet
tk.MustQuery("execute pointGet using @a").Check(testkit.Rows("1"))
require.True(t, lastReadFromCache(tk))
tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
}
func TestPlanCacheWithRCWhenInfoSchemaChange(t *testing.T) {
ctx := context.Background()
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk1.MustExec("use test")
tk1.MustExec("set global tidb_enable_metadata_lock=0")
tk2.MustExec("use test")
tk1.MustExec("drop table if exists t1")
tk1.MustExec("create table t1(id int primary key, c int, index ic (c))")
// prepare text protocol
tk1.MustExec("prepare s from 'select /*+use_index(t1, ic)*/ * from t1 where 1'")
// prepare binary protocol
stmtID, _, _, err := tk2.Session().PrepareStmt("select /*+use_index(t1, ic)*/ * from t1 where 1")
require.Nil(t, err)
tk1.MustExec("set tx_isolation='READ-COMMITTED'")
tk1.MustExec("begin pessimistic")
tk2.MustExec("set tx_isolation='READ-COMMITTED'")
tk2.MustExec("begin pessimistic")
tk1.MustQuery("execute s").Check(testkit.Rows())
rs, err := tk2.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.Nil(t, err)
tk2.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows())
tk3 := testkit.NewTestKit(t, store)
tk3.MustExec("use test")
tk3.MustExec("alter table t1 drop index ic")
tk3.MustExec("insert into t1 values(1, 0)")
// The execution after schema changed should not hit plan cache.
// execute text protocol
tk1.MustQuery("execute s").Check(testkit.Rows("1 0"))
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
// execute binary protocol
rs, err = tk2.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.Nil(t, err)
tk2.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 0"))
tk2.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
}
func TestConsistencyBetweenPrepareExecuteAndNormalSql(t *testing.T) {
ctx := context.Background()
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk1.MustExec("set global tidb_enable_metadata_lock=0")
tk1.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk2.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk1.MustExec("use test")
tk2.MustExec("use test")
tk1.MustExec("drop table if exists t1")
tk1.MustExec("create table t1(id int primary key, c int)")
tk1.MustExec("insert into t1 values(1, 1), (2, 2)")
// prepare text protocol
tk1.MustExec("prepare s from 'select * from t1'")
// prepare binary protocol
stmtID, _, _, err := tk1.Session().PrepareStmt("select * from t1")
require.Nil(t, err)
tk1.MustExec("set tx_isolation='READ-COMMITTED'")
tk1.MustExec("begin pessimistic")
tk2.MustExec("set tx_isolation='READ-COMMITTED'")
tk2.MustExec("begin pessimistic")
// Execute using sql
tk1.MustQuery("execute s").Check(testkit.Rows("1 1", "2 2"))
// Execute using binary
rs, err := tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.Nil(t, err)
tk1.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 1", "2 2"))
// Normal sql
tk1.MustQuery("select * from t1").Check(testkit.Rows("1 1", "2 2"))
// Change infoSchema
tk2.MustExec("alter table t1 drop column c")
tk2.MustExec("insert into t1 values (3)")
// Execute using sql
tk1.MustQuery("execute s").Check(testkit.Rows("1 1", "2 2", "3 <nil>"))
// Execute using binary
rs, err = tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.Nil(t, err)
tk1.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 1", "2 2", "3 <nil>"))
// Normal sql
tk1.MustQuery("select * from t1").Check(testkit.Rows("1 1", "2 2", "3 <nil>"))
tk1.MustExec("commit")
// After beginning a new txn, the infoSchema should be the latest
tk1.MustExec("begin pessimistic")
tk1.MustQuery("select * from t1").Check(testkit.Rows("1", "2", "3"))
}
func verifyCache(ctx context.Context, t *testing.T, tk1 *testkit.TestKit, tk2 *testkit.TestKit, stmtID uint32) {
// Cache miss in the firs time.
tk1.MustExec("execute s")
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
// This time, the cache will be hit.
rs, err := tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.NoError(t, err)
require.NoError(t, rs.Close())
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk1.MustExec("execute s")
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
// Change infoSchema version which will make the plan cache invalid in the next execute
tk2.MustExec("alter table t1 drop column c")
tk1.MustExec("execute s")
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
// Now the plan cache will be valid
rs, err = tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.NoError(t, err)
require.NoError(t, rs.Close())
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
}
func TestCacheHitInRc(t *testing.T) {
ctx := context.Background()
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk1.MustExec("set global tidb_enable_metadata_lock=0")
tk1.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk2.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk1.MustExec("use test")
tk2.MustExec("use test")
tk1.MustExec("drop table if exists t1")
tk1.MustExec("create table t1(id int primary key, c int)")
tk1.MustExec("insert into t1 values(1, 1), (2, 2)")
// prepare text protocol
tk1.MustExec("prepare s from 'select * from t1'")
// prepare binary protocol
stmtID, _, _, err := tk1.Session().PrepareStmt("select * from t1")
require.Nil(t, err)
// Test for RC
tk1.MustExec("set tx_isolation='READ-COMMITTED'")
tk1.MustExec("begin pessimistic")
// Verify for the RC isolation
verifyCache(ctx, t, tk1, tk2, stmtID)
tk1.MustExec("rollback")
}
func TestCacheHitInForUpdateRead(t *testing.T) {
ctx := context.Background()
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk1.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk2.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk1.MustExec("use test")
tk2.MustExec("use test")
tk1.MustExec("drop table if exists t1")
tk1.MustExec("create table t1(id int primary key, c int)")
tk1.MustExec("insert into t1 values(1, 1), (2, 2)")
tk1.MustExec("prepare s from 'select * from t1 where id = 1 for update'")
stmtID, _, _, err := tk1.Session().PrepareStmt("select * from t1 where id = 1 for update")
require.Nil(t, err)
tk1.MustExec("begin pessimistic")
// Verify for the for update read
verifyCache(ctx, t, tk1, tk2, stmtID)
tk1.MustExec("rollback")
}
func TestPointGetForUpdateAutoCommitCache(t *testing.T) {
ctx := context.Background()
store := testkit.CreateMockStore(t)
tk1 := testkit.NewTestKit(t, store)
tk2 := testkit.NewTestKit(t, store)
tk1.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk2.MustExec(`set tidb_enable_prepared_plan_cache=1`)
tk1.MustExec("use test")
tk2.MustExec("use test")
tk1.MustExec("drop table if exists t1")
tk1.MustExec("create table t1(id int primary key, c int)")
tk1.MustExec("insert into t1 values(1, 1), (2, 2)")
tk1.MustExec("prepare s from 'select * from t1 where id = 1 for update'")
stmtID, _, _, err := tk1.Session().PrepareStmt("select * from t1 where id = 1 for update")
require.Nil(t, err)
rs, err := tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.Nil(t, err)
tk1.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 1"))
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
rs, err = tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.Nil(t, err)
tk1.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 1"))
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
tk2.MustExec("alter table t1 drop column c")
tk2.MustExec("update t1 set id = 10 where id = 1")
rs, err = tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.Nil(t, err)
tk1.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows())
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0"))
rs, err = tk1.Session().ExecutePreparedStmt(ctx, stmtID, expression.Args2Expressions4Test())
require.Nil(t, err)
tk1.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows())
tk1.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1"))
}
| pkg/planner/core/tests/prepare/prepare_test.go | 1 | https://github.com/pingcap/tidb/commit/f72e1d966ce316f6f8b391d8f3e328210f04c56f | [
0.001911916653625667,
0.00024206010857596993,
0.0001610149338375777,
0.00017283865599893034,
0.0002006090508075431
] |
{
"id": 1,
"code_window": [
"\tast.SetVar: {},\n",
"\tast.GetVar: {},\n",
"\tast.ReleaseAllLocks: {},\n",
"}\n",
"\n",
"// DeferredFunctions stores functions which are foldable but should be deferred as well when plan cache is enabled.\n",
"// Note that, these functions must be foldable at first place, i.e, they are not in `unFoldableFunctions`.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"// IsDeferredFunctions checks whether the function is in DeferredFunctions.\n"
],
"file_path": "pkg/expression/function_traits.go",
"type": "add",
"edit_start_line_idx": 135
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rowcodec_test
import (
"encoding/binary"
"hash/crc32"
"math"
"sort"
"strings"
"testing"
"time"
"github.com/pingcap/tidb/pkg/kv"
"github.com/pingcap/tidb/pkg/parser/model"
"github.com/pingcap/tidb/pkg/parser/mysql"
"github.com/pingcap/tidb/pkg/sessionctx/stmtctx"
"github.com/pingcap/tidb/pkg/tablecodec"
"github.com/pingcap/tidb/pkg/types"
"github.com/pingcap/tidb/pkg/util/chunk"
"github.com/pingcap/tidb/pkg/util/codec"
"github.com/pingcap/tidb/pkg/util/collate"
"github.com/pingcap/tidb/pkg/util/rowcodec"
"github.com/stretchr/testify/require"
)
type testData struct {
id int64
ft *types.FieldType
input types.Datum
output types.Datum
def *types.Datum
handle bool
}
func TestEncodeLargeSmallReuseBug(t *testing.T) {
// reuse one rowcodec.Encoder.
var encoder rowcodec.Encoder
colFt := types.NewFieldType(mysql.TypeString)
largeColID := int64(300)
b, err := encoder.Encode(nil, []int64{largeColID}, []types.Datum{types.NewBytesDatum([]byte(""))}, nil)
require.NoError(t, err)
bDecoder := rowcodec.NewDatumMapDecoder([]rowcodec.ColInfo{
{
ID: largeColID,
Ft: colFt,
IsPKHandle: false,
},
}, nil)
_, err = bDecoder.DecodeToDatumMap(b, nil)
require.NoError(t, err)
colFt = types.NewFieldType(mysql.TypeLonglong)
smallColID := int64(1)
b, err = encoder.Encode(nil, []int64{smallColID}, []types.Datum{types.NewIntDatum(2)}, nil)
require.NoError(t, err)
bDecoder = rowcodec.NewDatumMapDecoder([]rowcodec.ColInfo{
{
ID: smallColID,
Ft: colFt,
IsPKHandle: false,
},
}, nil)
m, err := bDecoder.DecodeToDatumMap(b, nil)
require.NoError(t, err)
v := m[smallColID]
require.Equal(t, int64(2), v.GetInt64())
}
func TestDecodeRowWithHandle(t *testing.T) {
handleID := int64(-1)
handleValue := int64(10000)
tests := []struct {
name string
testData []testData
}{
{
"signed int",
[]testData{
{
handleID,
types.NewFieldType(mysql.TypeLonglong),
types.NewIntDatum(handleValue),
types.NewIntDatum(handleValue),
nil,
true,
},
{
10,
types.NewFieldType(mysql.TypeLonglong),
types.NewIntDatum(1),
types.NewIntDatum(1),
nil,
false,
},
},
},
{
"unsigned int",
[]testData{
{
handleID,
withUnsigned(types.NewFieldType(mysql.TypeLonglong)),
types.NewUintDatum(uint64(handleValue)),
types.NewUintDatum(uint64(handleValue)), // decode as bytes will uint if unsigned.
nil,
true,
},
{
10,
types.NewFieldType(mysql.TypeLonglong),
types.NewIntDatum(1),
types.NewIntDatum(1),
nil,
false,
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
td := test.testData
// transform test data into input.
colIDs := make([]int64, 0, len(td))
dts := make([]types.Datum, 0, len(td))
fts := make([]*types.FieldType, 0, len(td))
cols := make([]rowcodec.ColInfo, 0, len(td))
handleColFtMap := make(map[int64]*types.FieldType)
for _, d := range td {
if d.handle {
handleColFtMap[handleID] = d.ft
} else {
colIDs = append(colIDs, d.id)
dts = append(dts, d.input)
}
fts = append(fts, d.ft)
cols = append(cols, rowcodec.ColInfo{
ID: d.id,
IsPKHandle: d.handle,
Ft: d.ft,
})
}
// test encode input.
var encoder rowcodec.Encoder
newRow, err := encoder.Encode(time.UTC, colIDs, dts, nil)
require.NoError(t, err)
// decode to datum map.
mDecoder := rowcodec.NewDatumMapDecoder(cols, time.UTC)
dm, err := mDecoder.DecodeToDatumMap(newRow, nil)
require.NoError(t, err)
dm, err = tablecodec.DecodeHandleToDatumMap(kv.IntHandle(handleValue), []int64{handleID}, handleColFtMap, time.UTC, dm)
require.NoError(t, err)
for _, d := range td {
dat, exists := dm[d.id]
require.True(t, exists)
require.Equal(t, d.input, dat)
}
// decode to chunk.
cDecoder := rowcodec.NewChunkDecoder(cols, []int64{-1}, nil, time.UTC)
chk := chunk.New(fts, 1, 1)
err = cDecoder.DecodeToChunk(newRow, kv.IntHandle(handleValue), chk)
require.NoError(t, err)
chkRow := chk.GetRow(0)
cdt := chkRow.GetDatumRow(fts)
for i, d := range td {
dat := cdt[i]
if dat.Kind() == types.KindMysqlDecimal {
require.Equal(t, d.output.GetMysqlDecimal(), dat.GetMysqlDecimal())
} else {
require.Equal(t, d.output, dat)
}
}
// decode to old row bytes.
colOffset := make(map[int64]int)
for i, t := range td {
colOffset[t.id] = i
}
bDecoder := rowcodec.NewByteDecoder(cols, []int64{-1}, nil, nil)
oldRow, err := bDecoder.DecodeToBytes(colOffset, kv.IntHandle(handleValue), newRow, nil)
require.NoError(t, err)
for i, d := range td {
remain, dat, err := codec.DecodeOne(oldRow[i])
require.NoError(t, err)
require.Len(t, remain, 0)
if dat.Kind() == types.KindMysqlDecimal {
require.Equal(t, d.output.GetMysqlDecimal(), dat.GetMysqlDecimal())
} else {
require.Equal(t, d.output, dat)
}
}
})
}
}
func TestEncodeKindNullDatum(t *testing.T) {
var encoder rowcodec.Encoder
colIDs := []int64{1, 2}
var nilDt types.Datum
nilDt.SetNull()
dts := []types.Datum{nilDt, types.NewIntDatum(2)}
ft := types.NewFieldType(mysql.TypeLonglong)
fts := []*types.FieldType{ft, ft}
newRow, err := encoder.Encode(time.UTC, colIDs, dts, nil)
require.NoError(t, err)
cols := []rowcodec.ColInfo{{ID: 1, Ft: ft}, {ID: 2, Ft: ft}}
cDecoder := rowcodec.NewChunkDecoder(cols, []int64{-1}, nil, time.UTC)
chk := chunk.New(fts, 1, 1)
err = cDecoder.DecodeToChunk(newRow, kv.IntHandle(-1), chk)
require.NoError(t, err)
chkRow := chk.GetRow(0)
cdt := chkRow.GetDatumRow(fts)
require.True(t, cdt[0].IsNull())
require.Equal(t, int64(2), cdt[1].GetInt64())
}
func TestDecodeDecimalFspNotMatch(t *testing.T) {
var encoder rowcodec.Encoder
colIDs := []int64{
1,
}
dec := withFrac(4)(withLen(6)(types.NewDecimalDatum(types.NewDecFromStringForTest("11.9900"))))
dts := []types.Datum{dec}
ft := types.NewFieldType(mysql.TypeNewDecimal)
ft.SetDecimal(4)
fts := []*types.FieldType{ft}
newRow, err := encoder.Encode(time.UTC, colIDs, dts, nil)
require.NoError(t, err)
// decode to chunk.
ft = types.NewFieldType(mysql.TypeNewDecimal)
ft.SetDecimal(3)
cols := make([]rowcodec.ColInfo, 0)
cols = append(cols, rowcodec.ColInfo{
ID: 1,
Ft: ft,
})
cDecoder := rowcodec.NewChunkDecoder(cols, []int64{-1}, nil, time.UTC)
chk := chunk.New(fts, 1, 1)
err = cDecoder.DecodeToChunk(newRow, kv.IntHandle(-1), chk)
require.NoError(t, err)
chkRow := chk.GetRow(0)
cdt := chkRow.GetDatumRow(fts)
dec = withFrac(3)(withLen(6)(types.NewDecimalDatum(types.NewDecFromStringForTest("11.990"))))
require.Equal(t, dec.GetMysqlDecimal().String(), cdt[0].GetMysqlDecimal().String())
}
func TestTypesNewRowCodec(t *testing.T) {
getJSONDatum := func(value string) types.Datum {
j, err := types.ParseBinaryJSONFromString(value)
require.NoError(t, err)
var d types.Datum
d.SetMysqlJSON(j)
return d
}
getSetDatum := func(name string, value uint64) types.Datum {
var d types.Datum
d.SetMysqlSet(types.Set{Name: name, Value: value}, mysql.DefaultCollationName)
return d
}
getTime := func(value string) types.Time {
d, err := types.ParseTime(types.DefaultStmtNoWarningContext, value, mysql.TypeTimestamp, 6)
require.NoError(t, err)
return d
}
blobTp := types.NewFieldType(mysql.TypeBlob)
blobTp.SetCollate(mysql.DefaultCollationName)
blobTp.SetFlen(types.UnspecifiedLength)
strTp := types.NewFieldType(mysql.TypeString)
strTp.SetCollate(mysql.DefaultCollationName)
enumTp := types.NewFieldType(mysql.TypeEnum)
enumTp.SetCollate(mysql.DefaultCollationName)
enumTp.SetFlen(collate.DefaultLen)
setTp := types.NewFieldType(mysql.TypeSet)
setTp.SetCollate(mysql.DefaultCollationName)
setTp.SetFlen(collate.DefaultLen)
varStrTp := types.NewFieldType(mysql.TypeVarString)
varStrTp.SetCollate(mysql.DefaultCollationName)
smallTestDataList := []testData{
{
1,
types.NewFieldType(mysql.TypeLonglong),
types.NewIntDatum(1),
types.NewIntDatum(1),
nil,
false,
},
{
22,
withUnsigned(types.NewFieldType(mysql.TypeShort)),
types.NewUintDatum(1),
types.NewUintDatum(1),
nil,
false,
},
{
3,
types.NewFieldType(mysql.TypeDouble),
types.NewFloat64Datum(2),
types.NewFloat64Datum(2),
nil,
false,
},
{
24,
blobTp,
types.NewStringDatum("abc"),
types.NewStringDatum("abc"),
nil,
false,
},
{
25,
strTp,
types.NewStringDatum("ab"),
types.NewBytesDatum([]byte("ab")),
nil,
false,
},
{
5,
withFsp(6)(types.NewFieldType(mysql.TypeTimestamp)),
types.NewTimeDatum(getTime("2011-11-10 11:11:11.999999")),
types.NewUintDatum(1840446893366133311),
nil,
false,
},
{
16,
withFsp(0)(types.NewFieldType(mysql.TypeDuration)),
types.NewDurationDatum(getDuration("4:00:00")),
types.NewIntDatum(14400000000000),
nil,
false,
},
{
8,
types.NewFieldType(mysql.TypeNewDecimal),
withFrac(4)(withLen(6)(types.NewDecimalDatum(types.NewDecFromStringForTest("11.9900")))),
withFrac(4)(withLen(6)(types.NewDecimalDatum(types.NewDecFromStringForTest("11.9900")))),
nil,
false,
},
{
12,
types.NewFieldType(mysql.TypeYear),
types.NewIntDatum(1999),
types.NewIntDatum(1999),
nil,
false,
},
{
9,
withEnumElems("y", "n")(enumTp),
types.NewMysqlEnumDatum(types.Enum{Name: "n", Value: 2}),
types.NewUintDatum(2),
nil,
false,
},
{
14,
types.NewFieldType(mysql.TypeJSON),
getJSONDatum(`{"a":2}`),
getJSONDatum(`{"a":2}`),
nil,
false,
},
{
11,
types.NewFieldType(mysql.TypeNull),
types.NewDatum(nil),
types.NewDatum(nil),
nil,
false,
},
{
2,
types.NewFieldType(mysql.TypeNull),
types.NewDatum(nil),
types.NewDatum(nil),
nil,
false,
},
{
100,
types.NewFieldType(mysql.TypeNull),
types.NewDatum(nil),
types.NewDatum(nil),
nil,
false,
},
{
116,
types.NewFieldType(mysql.TypeFloat),
types.NewFloat32Datum(6),
types.NewFloat64Datum(6),
nil,
false,
},
{
117,
withEnumElems("n1", "n2")(setTp),
getSetDatum("n1", 1),
types.NewUintDatum(1),
nil,
false,
},
{
118,
withFlen(24)(types.NewFieldType(mysql.TypeBit)), // 3 bit
types.NewMysqlBitDatum(types.NewBinaryLiteralFromUint(3223600, 3)),
types.NewUintDatum(3223600),
nil,
false,
},
{
119,
varStrTp,
types.NewStringDatum(""),
types.NewBytesDatum([]byte("")),
nil,
false,
},
}
largeColIDTestDataList := make([]testData, len(smallTestDataList))
copy(largeColIDTestDataList, smallTestDataList)
largeColIDTestDataList[0].id = 300
largeTestDataList := make([]testData, len(smallTestDataList))
copy(largeTestDataList, smallTestDataList)
largeTestDataList[3].input = types.NewStringDatum(strings.Repeat("a", math.MaxUint16+1))
largeTestDataList[3].output = types.NewStringDatum(strings.Repeat("a", math.MaxUint16+1))
var encoder rowcodec.Encoder
tests := []struct {
name string
testData []testData
}{
{
"small",
smallTestDataList,
},
{
"largeColID",
largeColIDTestDataList,
},
{
"largeData",
largeTestDataList,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
td := test.testData
// transform test data into input.
colIDs := make([]int64, 0, len(td))
dts := make([]types.Datum, 0, len(td))
fts := make([]*types.FieldType, 0, len(td))
cols := make([]rowcodec.ColInfo, 0, len(td))
for _, d := range td {
colIDs = append(colIDs, d.id)
dts = append(dts, d.input)
fts = append(fts, d.ft)
cols = append(cols, rowcodec.ColInfo{
ID: d.id,
IsPKHandle: d.handle,
Ft: d.ft,
})
}
// test encode input.
newRow, err := encoder.Encode(time.UTC, colIDs, dts, nil)
require.NoError(t, err)
// decode to datum map.
mDecoder := rowcodec.NewDatumMapDecoder(cols, time.UTC)
dm, err := mDecoder.DecodeToDatumMap(newRow, nil)
require.NoError(t, err)
for _, d := range td {
dat, exists := dm[d.id]
require.True(t, exists)
require.Equal(t, d.input, dat)
}
// decode to chunk.
cDecoder := rowcodec.NewChunkDecoder(cols, []int64{-1}, nil, time.UTC)
chk := chunk.New(fts, 1, 1)
err = cDecoder.DecodeToChunk(newRow, kv.IntHandle(-1), chk)
require.NoError(t, err)
chkRow := chk.GetRow(0)
cdt := chkRow.GetDatumRow(fts)
for i, d := range td {
dat := cdt[i]
if dat.Kind() == types.KindMysqlDecimal {
require.Equal(t, d.output.GetMysqlDecimal(), dat.GetMysqlDecimal())
} else {
require.Equal(t, d.input, dat)
}
}
// decode to old row bytes.
colOffset := make(map[int64]int)
for i, t := range td {
colOffset[t.id] = i
}
bDecoder := rowcodec.NewByteDecoder(cols, []int64{-1}, nil, nil)
oldRow, err := bDecoder.DecodeToBytes(colOffset, kv.IntHandle(-1), newRow, nil)
require.NoError(t, err)
for i, d := range td {
remain, dat, err := codec.DecodeOne(oldRow[i])
require.NoError(t, err)
require.Len(t, remain, 0)
if dat.Kind() == types.KindMysqlDecimal {
require.Equal(t, d.output.GetMysqlDecimal(), dat.GetMysqlDecimal())
} else if dat.Kind() == types.KindBytes {
require.Equal(t, d.output.GetBytes(), dat.GetBytes())
} else {
require.Equal(t, d.output, dat)
}
}
})
}
}
func TestNilAndDefault(t *testing.T) {
td := []testData{
{
1,
types.NewFieldType(mysql.TypeLonglong),
types.NewIntDatum(1),
types.NewIntDatum(1),
nil,
false,
},
{
2,
withUnsigned(types.NewFieldType(mysql.TypeLonglong)),
types.NewUintDatum(1),
types.NewUintDatum(9),
getDatumPoint(types.NewUintDatum(9)),
false,
},
}
// transform test data into input.
colIDs := make([]int64, 0, len(td))
dts := make([]types.Datum, 0, len(td))
cols := make([]rowcodec.ColInfo, 0, len(td))
fts := make([]*types.FieldType, 0, len(td))
for i := range td {
d := td[i]
if d.def == nil {
colIDs = append(colIDs, d.id)
dts = append(dts, d.input)
}
fts = append(fts, d.ft)
cols = append(cols, rowcodec.ColInfo{
ID: d.id,
IsPKHandle: d.handle,
Ft: d.ft,
})
}
ddf := func(i int, chk *chunk.Chunk) error {
d := td[i]
if d.def == nil {
chk.AppendNull(i)
return nil
}
chk.AppendDatum(i, d.def)
return nil
}
bdf := func(i int) ([]byte, error) {
d := td[i]
if d.def == nil {
return nil, nil
}
return getOldDatumByte(*d.def), nil
}
// test encode input.
var encoder rowcodec.Encoder
newRow, err := encoder.Encode(time.UTC, colIDs, dts, nil)
require.NoError(t, err)
// decode to datum map.
mDecoder := rowcodec.NewDatumMapDecoder(cols, time.UTC)
dm, err := mDecoder.DecodeToDatumMap(newRow, nil)
require.NoError(t, err)
for _, d := range td {
dat, exists := dm[d.id]
if d.def != nil {
// for datum should not fill default value.
require.False(t, exists)
} else {
require.True(t, exists)
require.Equal(t, d.output, dat)
}
}
// decode to chunk.
chk := chunk.New(fts, 1, 1)
cDecoder := rowcodec.NewChunkDecoder(cols, []int64{-1}, ddf, time.UTC)
err = cDecoder.DecodeToChunk(newRow, kv.IntHandle(-1), chk)
require.NoError(t, err)
chkRow := chk.GetRow(0)
cdt := chkRow.GetDatumRow(fts)
for i, d := range td {
dat := cdt[i]
if dat.Kind() == types.KindMysqlDecimal {
require.Equal(t, d.output.GetMysqlDecimal(), dat.GetMysqlDecimal())
} else {
require.Equal(t, d.output, dat)
}
}
chk = chunk.New(fts, 1, 1)
cDecoder = rowcodec.NewChunkDecoder(cols, []int64{-1}, nil, time.UTC)
err = cDecoder.DecodeToChunk(newRow, kv.IntHandle(-1), chk)
require.NoError(t, err)
chkRow = chk.GetRow(0)
cdt = chkRow.GetDatumRow(fts)
for i := range td {
if i == 0 {
continue
}
require.True(t, cdt[i].IsNull())
}
// decode to old row bytes.
colOffset := make(map[int64]int)
for i, t := range td {
colOffset[t.id] = i
}
bDecoder := rowcodec.NewByteDecoder(cols, []int64{-1}, bdf, time.UTC)
oldRow, err := bDecoder.DecodeToBytes(colOffset, kv.IntHandle(-1), newRow, nil)
require.NoError(t, err)
for i, d := range td {
remain, dat, err := codec.DecodeOne(oldRow[i])
require.NoError(t, err)
require.Len(t, remain, 0)
if dat.Kind() == types.KindMysqlDecimal {
require.Equal(t, d.output.GetMysqlDecimal(), dat.GetMysqlDecimal())
} else {
require.Equal(t, d.output, dat)
}
}
}
func TestVarintCompatibility(t *testing.T) {
td := []testData{
{
1,
types.NewFieldType(mysql.TypeLonglong),
types.NewIntDatum(1),
types.NewIntDatum(1),
nil,
false,
},
{
2,
withUnsigned(types.NewFieldType(mysql.TypeLonglong)),
types.NewUintDatum(1),
types.NewUintDatum(1),
nil,
false,
},
}
// transform test data into input.
colIDs := make([]int64, 0, len(td))
dts := make([]types.Datum, 0, len(td))
cols := make([]rowcodec.ColInfo, 0, len(td))
for _, d := range td {
colIDs = append(colIDs, d.id)
dts = append(dts, d.input)
cols = append(cols, rowcodec.ColInfo{
ID: d.id,
IsPKHandle: d.handle,
Ft: d.ft,
})
}
// test encode input.
var encoder rowcodec.Encoder
newRow, err := encoder.Encode(time.UTC, colIDs, dts, nil)
require.NoError(t, err)
decoder := rowcodec.NewByteDecoder(cols, []int64{-1}, nil, time.UTC)
// decode to old row bytes.
colOffset := make(map[int64]int)
for i, t := range td {
colOffset[t.id] = i
}
oldRow, err := decoder.DecodeToBytes(colOffset, kv.IntHandle(1), newRow, nil)
require.NoError(t, err)
for i, d := range td {
oldVarint, err := tablecodec.EncodeValue(nil, nil, d.output) // tablecodec will encode as varint/varuint
require.NoError(t, err)
require.Equal(t, oldRow[i], oldVarint)
}
}
func TestCodecUtil(t *testing.T) {
colIDs := []int64{1, 2, 3, 4}
tps := make([]*types.FieldType, 4)
for i := 0; i < 3; i++ {
tps[i] = types.NewFieldType(mysql.TypeLonglong)
}
tps[3] = types.NewFieldType(mysql.TypeNull)
sc := stmtctx.NewStmtCtx()
oldRow, err := tablecodec.EncodeOldRow(sc.TimeZone(), types.MakeDatums(1, 2, 3, nil), colIDs, nil, nil)
require.NoError(t, err)
var (
rb rowcodec.Encoder
newRow []byte
)
newRow, err = rowcodec.EncodeFromOldRow(&rb, nil, oldRow, nil)
require.NoError(t, err)
require.True(t, rowcodec.IsNewFormat(newRow))
require.False(t, rowcodec.IsNewFormat(oldRow))
// test stringer for decoder.
var cols = make([]rowcodec.ColInfo, 0, len(tps))
for i, ft := range tps {
cols = append(cols, rowcodec.ColInfo{
ID: colIDs[i],
IsPKHandle: false,
Ft: ft,
})
}
d := rowcodec.NewDecoder(cols, []int64{-1}, nil)
// test ColumnIsNull
isNull, err := d.ColumnIsNull(newRow, 4, nil)
require.NoError(t, err)
require.True(t, isNull)
isNull, err = d.ColumnIsNull(newRow, 1, nil)
require.NoError(t, err)
require.False(t, isNull)
isNull, err = d.ColumnIsNull(newRow, 5, nil)
require.NoError(t, err)
require.True(t, isNull)
isNull, err = d.ColumnIsNull(newRow, 5, []byte{1})
require.NoError(t, err)
require.False(t, isNull)
// test isRowKey
require.False(t, rowcodec.IsRowKey([]byte{'b', 't'}))
require.False(t, rowcodec.IsRowKey([]byte{'t', 'r'}))
}
func TestOldRowCodec(t *testing.T) {
colIDs := []int64{1, 2, 3, 4}
tps := make([]*types.FieldType, 4)
for i := 0; i < 3; i++ {
tps[i] = types.NewFieldType(mysql.TypeLonglong)
}
tps[3] = types.NewFieldType(mysql.TypeNull)
sc := stmtctx.NewStmtCtx()
oldRow, err := tablecodec.EncodeOldRow(sc.TimeZone(), types.MakeDatums(1, 2, 3, nil), colIDs, nil, nil)
require.NoError(t, err)
var (
rb rowcodec.Encoder
newRow []byte
)
newRow, err = rowcodec.EncodeFromOldRow(&rb, nil, oldRow, nil)
require.NoError(t, err)
cols := make([]rowcodec.ColInfo, len(tps))
for i, tp := range tps {
cols[i] = rowcodec.ColInfo{
ID: colIDs[i],
Ft: tp,
}
}
rd := rowcodec.NewChunkDecoder(cols, []int64{-1}, nil, time.Local)
chk := chunk.NewChunkWithCapacity(tps, 1)
err = rd.DecodeToChunk(newRow, kv.IntHandle(-1), chk)
require.NoError(t, err)
row := chk.GetRow(0)
for i := 0; i < 3; i++ {
require.Equal(t, int64(i+1), row.GetInt64(i))
}
}
func Test65535Bug(t *testing.T) {
colIds := []int64{1}
tps := make([]*types.FieldType, 1)
tps[0] = types.NewFieldType(mysql.TypeString)
text65535 := strings.Repeat("a", 65535)
encode := rowcodec.Encoder{}
bd, err := encode.Encode(time.UTC, colIds, []types.Datum{types.NewStringDatum(text65535)}, nil)
require.NoError(t, err)
cols := make([]rowcodec.ColInfo, 1)
cols[0] = rowcodec.ColInfo{
ID: 1,
Ft: tps[0],
}
dc := rowcodec.NewDatumMapDecoder(cols, nil)
result, err := dc.DecodeToDatumMap(bd, nil)
require.NoError(t, err)
rs := result[1]
require.Equal(t, text65535, rs.GetString())
}
func TestColumnEncode(t *testing.T) {
encodeUint64 := func(v uint64) []byte {
return binary.LittleEndian.AppendUint64(nil, v)
}
encodeBytes := func(v []byte) []byte {
return append(binary.LittleEndian.AppendUint32(nil, uint32(len(v))), v...)
}
var (
buf = make([]byte, 0, 128)
intZero = 0
intPos = 42
intNeg = -2
i8Min = math.MinInt8
i16Min = math.MinInt16
i32Min = math.MinInt32
i64Min = math.MinInt64
i24Min = -1 << 23
ct = types.FromDate(2023, 1, 2, 3, 4, 5, 678)
dur = types.Duration{Duration: 123456*time.Microsecond + 7*time.Minute + 8*time.Hour, Fsp: 6}
decZero = types.NewDecFromStringForTest("0.000")
decPos = types.NewDecFromStringForTest("3.14")
decNeg = types.NewDecFromStringForTest("-1.2")
decMin = types.NewMaxOrMinDec(true, 12, 6)
decMax = types.NewMaxOrMinDec(false, 12, 6)
json1 = types.CreateBinaryJSON(nil)
json2 = types.CreateBinaryJSON(int64(42))
json3 = types.CreateBinaryJSON(map[string]interface{}{"foo": "bar", "a": int64(42)})
)
for _, tt := range []struct {
name string
typ *types.FieldType
dat types.Datum
raw []byte
ok bool
}{
{"unspecified", types.NewFieldType(mysql.TypeUnspecified), types.NewDatum(1), nil, false},
{"wrong", types.NewFieldType(42), types.NewDatum(1), nil, false},
{"mismatch/timestamp", types.NewFieldType(mysql.TypeTimestamp), types.NewDatum(1), nil, false},
{"mismatch/datetime", types.NewFieldType(mysql.TypeDatetime), types.NewDatum(1), nil, false},
{"mismatch/date", types.NewFieldType(mysql.TypeDate), types.NewDatum(1), nil, false},
{"mismatch/newdate", types.NewFieldType(mysql.TypeNewDate), types.NewDatum(1), nil, false},
{"mismatch/decimal", types.NewFieldType(mysql.TypeNewDecimal), types.NewDatum(1), nil, false},
{"null", types.NewFieldType(mysql.TypeNull), types.NewDatum(1), nil, true},
{"geometry", types.NewFieldType(mysql.TypeGeometry), types.NewDatum(1), nil, true},
{"tinyint/zero", types.NewFieldType(mysql.TypeTiny), types.NewDatum(intZero), encodeUint64(uint64(intZero)), true},
{"tinyint/pos", types.NewFieldType(mysql.TypeTiny), types.NewDatum(intPos), encodeUint64(uint64(intPos)), true},
{"tinyint/neg", types.NewFieldType(mysql.TypeTiny), types.NewDatum(intNeg), encodeUint64(uint64(intNeg)), true},
{"tinyint/min/signed", types.NewFieldType(mysql.TypeTiny), types.NewDatum(i8Min), encodeUint64(uint64(i8Min)), true},
{"tinyint/max/signed", types.NewFieldType(mysql.TypeTiny), types.NewDatum(math.MaxInt8), encodeUint64(math.MaxInt8), true},
{"tinyint/max/unsigned", types.NewFieldType(mysql.TypeTiny), types.NewDatum(math.MaxUint8), encodeUint64(math.MaxUint8), true},
{"smallint/zero", types.NewFieldType(mysql.TypeShort), types.NewDatum(intZero), encodeUint64(uint64(intZero)), true},
{"smallint/pos", types.NewFieldType(mysql.TypeShort), types.NewDatum(intPos), encodeUint64(uint64(intPos)), true},
{"smallint/neg", types.NewFieldType(mysql.TypeShort), types.NewDatum(intNeg), encodeUint64(uint64(intNeg)), true},
{"smallint/min/signed", types.NewFieldType(mysql.TypeShort), types.NewDatum(i16Min), encodeUint64(uint64(i16Min)), true},
{"smallint/max/signed", types.NewFieldType(mysql.TypeShort), types.NewDatum(math.MaxInt16), encodeUint64(math.MaxInt16), true},
{"smallint/max/unsigned", types.NewFieldType(mysql.TypeShort), types.NewDatum(math.MaxUint16), encodeUint64(math.MaxUint16), true},
{"int/zero", types.NewFieldType(mysql.TypeLong), types.NewDatum(intZero), encodeUint64(uint64(intZero)), true},
{"int/pos", types.NewFieldType(mysql.TypeLong), types.NewDatum(intPos), encodeUint64(uint64(intPos)), true},
{"int/neg", types.NewFieldType(mysql.TypeLong), types.NewDatum(intNeg), encodeUint64(uint64(intNeg)), true},
{"int/min/signed", types.NewFieldType(mysql.TypeLong), types.NewDatum(i32Min), encodeUint64(uint64(i32Min)), true},
{"int/max/signed", types.NewFieldType(mysql.TypeLong), types.NewDatum(math.MaxInt32), encodeUint64(math.MaxInt32), true},
{"int/max/unsigned", types.NewFieldType(mysql.TypeLong), types.NewDatum(math.MaxUint32), encodeUint64(math.MaxUint32), true},
{"bigint/zero", types.NewFieldType(mysql.TypeLonglong), types.NewDatum(intZero), encodeUint64(uint64(intZero)), true},
{"bigint/pos", types.NewFieldType(mysql.TypeLonglong), types.NewDatum(intPos), encodeUint64(uint64(intPos)), true},
{"bigint/neg", types.NewFieldType(mysql.TypeLonglong), types.NewDatum(intNeg), encodeUint64(uint64(intNeg)), true},
{"bigint/min/signed", types.NewFieldType(mysql.TypeLonglong), types.NewDatum(i64Min), encodeUint64(uint64(i64Min)), true},
{"bigint/max/signed", types.NewFieldType(mysql.TypeLonglong), types.NewDatum(math.MaxInt64), encodeUint64(math.MaxInt64), true},
{"bigint/max/unsigned", types.NewFieldType(mysql.TypeLonglong), types.NewDatum(uint64(math.MaxUint64)), encodeUint64(math.MaxUint64), true},
{"mediumint/zero", types.NewFieldType(mysql.TypeInt24), types.NewDatum(intZero), encodeUint64(uint64(intZero)), true},
{"mediumint/pos", types.NewFieldType(mysql.TypeInt24), types.NewDatum(intPos), encodeUint64(uint64(intPos)), true},
{"mediumint/neg", types.NewFieldType(mysql.TypeInt24), types.NewDatum(intNeg), encodeUint64(uint64(intNeg)), true},
{"mediumint/min/signed", types.NewFieldType(mysql.TypeInt24), types.NewDatum(i24Min), encodeUint64(uint64(i24Min)), true},
{"mediumint/max/signed", types.NewFieldType(mysql.TypeInt24), types.NewDatum(1<<23 - 1), encodeUint64(1<<23 - 1), true},
{"mediumint/max/unsigned", types.NewFieldType(mysql.TypeInt24), types.NewDatum(1<<24 - 1), encodeUint64(1<<24 - 1), true},
{"year", types.NewFieldType(mysql.TypeYear), types.NewDatum(2023), encodeUint64(2023), true},
{"varchar", types.NewFieldType(mysql.TypeVarchar), types.NewDatum("foo"), encodeBytes([]byte("foo")), true},
{"varchar/empty", types.NewFieldType(mysql.TypeVarchar), types.NewDatum(""), encodeBytes([]byte{}), true},
{"varbinary", types.NewFieldType(mysql.TypeVarString), types.NewDatum([]byte("foo")), encodeBytes([]byte("foo")), true},
{"varbinary/empty", types.NewFieldType(mysql.TypeVarString), types.NewDatum([]byte("")), encodeBytes([]byte{}), true},
{"char", types.NewFieldType(mysql.TypeString), types.NewDatum("foo"), encodeBytes([]byte("foo")), true},
{"char/empty", types.NewFieldType(mysql.TypeString), types.NewDatum(""), encodeBytes([]byte{}), true},
{"binary", types.NewFieldType(mysql.TypeString), types.NewDatum([]byte("foo")), encodeBytes([]byte("foo")), true},
{"binary/empty", types.NewFieldType(mysql.TypeString), types.NewDatum([]byte("")), encodeBytes([]byte{}), true},
{"text", types.NewFieldType(mysql.TypeBlob), types.NewDatum("foo"), encodeBytes([]byte("foo")), true},
{"text/empty", types.NewFieldType(mysql.TypeBlob), types.NewDatum(""), encodeBytes([]byte{}), true},
{"blob", types.NewFieldType(mysql.TypeBlob), types.NewDatum([]byte("foo")), encodeBytes([]byte("foo")), true},
{"blob/empty", types.NewFieldType(mysql.TypeBlob), types.NewDatum([]byte("")), encodeBytes([]byte{}), true},
{"longtext", types.NewFieldType(mysql.TypeLongBlob), types.NewDatum("foo"), encodeBytes([]byte("foo")), true},
{"longtext/empty", types.NewFieldType(mysql.TypeLongBlob), types.NewDatum(""), encodeBytes([]byte{}), true},
{"longblob", types.NewFieldType(mysql.TypeLongBlob), types.NewDatum([]byte("foo")), encodeBytes([]byte("foo")), true},
{"longblob/empty", types.NewFieldType(mysql.TypeLongBlob), types.NewDatum([]byte("")), encodeBytes([]byte{}), true},
{"mediumtext", types.NewFieldType(mysql.TypeMediumBlob), types.NewDatum("foo"), encodeBytes([]byte("foo")), true},
{"mediumtext/empty", types.NewFieldType(mysql.TypeMediumBlob), types.NewDatum(""), encodeBytes([]byte{}), true},
{"mediumblob", types.NewFieldType(mysql.TypeMediumBlob), types.NewDatum([]byte("foo")), encodeBytes([]byte("foo")), true},
{"mediumblob/empty", types.NewFieldType(mysql.TypeMediumBlob), types.NewDatum([]byte("")), encodeBytes([]byte{}), true},
{"tinytext", types.NewFieldType(mysql.TypeTinyBlob), types.NewDatum("foo"), encodeBytes([]byte("foo")), true},
{"tinytext/empty", types.NewFieldType(mysql.TypeTinyBlob), types.NewDatum(""), encodeBytes([]byte{}), true},
{"tinyblob", types.NewFieldType(mysql.TypeTinyBlob), types.NewDatum([]byte("foo")), encodeBytes([]byte("foo")), true},
{"tinyblob/empty", types.NewFieldType(mysql.TypeTinyBlob), types.NewDatum([]byte("")), encodeBytes([]byte{}), true},
{"float", types.NewFieldType(mysql.TypeFloat), types.NewDatum(float32(3.14)), encodeUint64(math.Float64bits(float64(float32(3.14)))), true},
{"float/nan", types.NewFieldType(mysql.TypeFloat), types.NewDatum(float32(math.NaN())), encodeUint64(math.Float64bits(0)), true},
{"float/+inf", types.NewFieldType(mysql.TypeFloat), types.NewDatum(float32(math.Inf(1))), encodeUint64(math.Float64bits(0)), true},
{"float/-inf", types.NewFieldType(mysql.TypeFloat), types.NewDatum(float32(math.Inf(-1))), encodeUint64(math.Float64bits(0)), true},
{"double", types.NewFieldType(mysql.TypeDouble), types.NewDatum(float64(3.14)), encodeUint64(math.Float64bits(3.14)), true},
{"double/nan", types.NewFieldType(mysql.TypeDouble), types.NewDatum(math.NaN()), encodeUint64(math.Float64bits(0)), true},
{"double/+inf", types.NewFieldType(mysql.TypeDouble), types.NewDatum(math.Inf(1)), encodeUint64(math.Float64bits(0)), true},
{"double/-inf", types.NewFieldType(mysql.TypeDouble), types.NewDatum(math.Inf(-1)), encodeUint64(math.Float64bits(0)), true},
{"enum", types.NewFieldType(mysql.TypeEnum), types.NewDatum(0b010), encodeUint64(0b010), true},
{"set", types.NewFieldType(mysql.TypeSet), types.NewDatum(0b101), encodeUint64(0b101), true},
{"bit", types.NewFieldType(mysql.TypeBit), types.NewBinaryLiteralDatum([]byte{0x12, 0x34}), encodeUint64(0x1234), true},
{"bit/truncate", types.NewFieldType(mysql.TypeBit), types.NewBinaryLiteralDatum([]byte{0x12, 0x34, 0x12, 0x34, 0x12, 0x34, 0x12, 0x34, 0xff}), encodeUint64(math.MaxUint64), true},
{
"timestamp", types.NewFieldType(mysql.TypeTimestamp),
types.NewTimeDatum(types.NewTime(ct, mysql.TypeTimestamp, 3)),
encodeBytes([]byte(types.NewTime(ct, mysql.TypeTimestamp, 3).String())),
true,
},
{
"timestamp/zero", types.NewFieldType(mysql.TypeTimestamp),
types.NewTimeDatum(types.ZeroTimestamp),
encodeBytes([]byte(types.ZeroTimestamp.String())),
true,
},
{
"timestamp/min", types.NewFieldType(mysql.TypeTimestamp),
types.NewTimeDatum(types.MinTimestamp),
encodeBytes([]byte(types.MinTimestamp.String())),
true,
},
{
"timestamp/max", types.NewFieldType(mysql.TypeTimestamp),
types.NewTimeDatum(types.MaxTimestamp),
encodeBytes([]byte(types.MaxTimestamp.String())),
true,
},
{
"datetime", types.NewFieldType(mysql.TypeDatetime),
types.NewTimeDatum(types.NewTime(ct, mysql.TypeDatetime, 3)),
encodeBytes([]byte(types.NewTime(ct, mysql.TypeDatetime, 3).String())),
true,
},
{
"datetime/zero", types.NewFieldType(mysql.TypeDatetime),
types.NewTimeDatum(types.ZeroDatetime),
encodeBytes([]byte(types.ZeroTimestamp.String())),
true,
},
{
"datetime/min", types.NewFieldType(mysql.TypeDatetime),
types.NewTimeDatum(types.NewTime(types.MinDatetime, mysql.TypeDatetime, 6)),
encodeBytes([]byte(types.NewTime(types.MinDatetime, mysql.TypeDatetime, 6).String())),
true,
},
{
"datetime/max", types.NewFieldType(mysql.TypeDatetime),
types.NewTimeDatum(types.NewTime(types.MaxDatetime, mysql.TypeDatetime, 6)),
encodeBytes([]byte(types.NewTime(types.MaxDatetime, mysql.TypeDatetime, 6).String())),
true,
},
{
"date", types.NewFieldType(mysql.TypeDate),
types.NewTimeDatum(types.NewTime(ct, mysql.TypeDate, 3)),
encodeBytes([]byte(types.NewTime(ct, mysql.TypeDate, 3).String())),
true,
},
{
"date/zero", types.NewFieldType(mysql.TypeDate),
types.NewTimeDatum(types.ZeroDate),
encodeBytes([]byte(types.ZeroDate.String())),
true,
},
{
"date/min",
types.NewFieldType(mysql.TypeDate),
types.NewTimeDatum(types.NewTime(types.MinDatetime, mysql.TypeDate, 6)),
encodeBytes([]byte(types.NewTime(types.MinDatetime, mysql.TypeDate, 6).String())),
true,
},
{
"date/max",
types.NewFieldType(mysql.TypeDate),
types.NewTimeDatum(types.NewTime(types.MaxDatetime, mysql.TypeDate, 6)),
encodeBytes([]byte(types.NewTime(types.MaxDatetime, mysql.TypeDate, 6).String())),
true,
},
{
"newdate", types.NewFieldType(mysql.TypeNewDate),
types.NewTimeDatum(types.NewTime(ct, mysql.TypeNewDate, 3)),
encodeBytes([]byte(types.NewTime(ct, mysql.TypeNewDate, 3).String())),
true,
},
{
"newdate/zero", types.NewFieldType(mysql.TypeNewDate),
types.NewTimeDatum(types.ZeroDate),
encodeBytes([]byte(types.ZeroDate.String())),
true,
},
{
"newdate/min",
types.NewFieldType(mysql.TypeNewDate),
types.NewTimeDatum(types.NewTime(types.MinDatetime, mysql.TypeNewDate, 6)),
encodeBytes([]byte(types.NewTime(types.MinDatetime, mysql.TypeNewDate, 6).String())),
true,
},
{
"newdate/max",
types.NewFieldType(mysql.TypeNewDate),
types.NewTimeDatum(types.NewTime(types.MaxDatetime, mysql.TypeNewDate, 6)),
encodeBytes([]byte(types.NewTime(types.MaxDatetime, mysql.TypeNewDate, 6).String())),
true,
},
{"time", types.NewFieldType(mysql.TypeDuration), types.NewDurationDatum(dur), encodeBytes([]byte(dur.String())), true},
{"time/zero", types.NewFieldType(mysql.TypeDuration), types.NewDurationDatum(types.ZeroDuration), encodeBytes([]byte(types.ZeroDuration.String())), true},
{"time/max", types.NewFieldType(mysql.TypeDuration), types.NewDurationDatum(types.MaxMySQLDuration(3)), encodeBytes([]byte(types.MaxMySQLDuration(3).String())), true},
{"decimal/zero", types.NewFieldType(mysql.TypeNewDecimal), types.NewDecimalDatum(decZero), encodeBytes([]byte(decZero.String())), true},
{"decimal/pos", types.NewFieldType(mysql.TypeNewDecimal), types.NewDecimalDatum(decPos), encodeBytes([]byte(decPos.String())), true},
{"decimal/neg", types.NewFieldType(mysql.TypeNewDecimal), types.NewDecimalDatum(decNeg), encodeBytes([]byte(decNeg.String())), true},
{"decimal/min", types.NewFieldType(mysql.TypeNewDecimal), types.NewDecimalDatum(decMin), encodeBytes([]byte(decMin.String())), true},
{"decimal/max", types.NewFieldType(mysql.TypeNewDecimal), types.NewDecimalDatum(decMax), encodeBytes([]byte(decMax.String())), true},
{"json/1", types.NewFieldType(mysql.TypeJSON), types.NewJSONDatum(json1), encodeBytes([]byte(json1.String())), true},
{"json/2", types.NewFieldType(mysql.TypeJSON), types.NewJSONDatum(json2), encodeBytes([]byte(json2.String())), true},
{"json/3", types.NewFieldType(mysql.TypeJSON), types.NewJSONDatum(json3), encodeBytes([]byte(json3.String())), true},
} {
t.Run(tt.name, func(t *testing.T) {
col := rowcodec.ColData{&model.ColumnInfo{FieldType: *tt.typ}, &tt.dat}
raw, err := col.Encode(buf[:0])
if tt.ok {
require.NoError(t, err)
if len(tt.raw) == 0 {
require.Len(t, raw, 0)
} else {
require.Equal(t, tt.raw, raw)
}
} else {
require.Error(t, err)
}
})
}
t.Run("nulldatum", func(t *testing.T) {
for _, typ := range []byte{
mysql.TypeUnspecified,
mysql.TypeTiny,
mysql.TypeShort,
mysql.TypeLong,
mysql.TypeFloat,
mysql.TypeDouble,
mysql.TypeNull,
mysql.TypeTimestamp,
mysql.TypeLonglong,
mysql.TypeInt24,
mysql.TypeDate,
mysql.TypeDuration,
mysql.TypeDatetime,
mysql.TypeYear,
mysql.TypeNewDate,
mysql.TypeVarchar,
mysql.TypeBit,
mysql.TypeJSON,
mysql.TypeNewDecimal,
mysql.TypeEnum,
mysql.TypeSet,
mysql.TypeTinyBlob,
mysql.TypeMediumBlob,
mysql.TypeLongBlob,
mysql.TypeBlob,
mysql.TypeVarString,
mysql.TypeString,
mysql.TypeGeometry,
42, // wrong type
} {
ft := types.NewFieldType(typ)
dat := types.NewDatum(nil)
col := rowcodec.ColData{&model.ColumnInfo{FieldType: *ft}, &dat}
raw, err := col.Encode(nil)
require.NoError(t, err)
require.Len(t, raw, 0)
}
})
}
func TestRowChecksum(t *testing.T) {
typ1 := types.NewFieldType(mysql.TypeNull)
dat1 := types.NewDatum(nil)
col1 := rowcodec.ColData{&model.ColumnInfo{ID: 1, FieldType: *typ1}, &dat1}
typ2 := types.NewFieldType(mysql.TypeLong)
dat2 := types.NewDatum(42)
col2 := rowcodec.ColData{&model.ColumnInfo{ID: 2, FieldType: *typ2}, &dat2}
typ3 := types.NewFieldType(mysql.TypeVarchar)
dat3 := types.NewDatum("foobar")
col3 := rowcodec.ColData{&model.ColumnInfo{ID: 2, FieldType: *typ3}, &dat3}
buf := make([]byte, 0, 64)
for _, tt := range []struct {
name string
cols []rowcodec.ColData
}{
{"nil", nil},
{"empty", []rowcodec.ColData{}},
{"nullonly", []rowcodec.ColData{col1}},
{"ordered", []rowcodec.ColData{col1, col2, col3}},
{"unordered", []rowcodec.ColData{col3, col1, col2}},
} {
t.Run(tt.name, func(t *testing.T) {
row := rowcodec.RowData{tt.cols, buf}
if !sort.IsSorted(row) {
sort.Sort(row)
}
checksum, err := row.Checksum()
require.NoError(t, err)
raw, err := row.Encode()
require.NoError(t, err)
require.Equal(t, crc32.ChecksumIEEE(raw), checksum)
})
}
}
func TestEncodeDecodeRowWithChecksum(t *testing.T) {
enc := rowcodec.Encoder{}
for _, tt := range []struct {
name string
checksums []uint32
}{
{"NoChecksum", nil},
{"OneChecksum", []uint32{1}},
{"TwoChecksum", []uint32{1, 2}},
{"ThreeChecksum", []uint32{1, 2, 3}},
} {
t.Run(tt.name, func(t *testing.T) {
raw, err := enc.Encode(time.UTC, nil, nil, nil, tt.checksums...)
require.NoError(t, err)
dec := rowcodec.NewDatumMapDecoder([]rowcodec.ColInfo{}, time.UTC)
_, err = dec.DecodeToDatumMap(raw, nil)
require.NoError(t, err)
v1, ok1 := enc.GetChecksum()
v2, ok2 := enc.GetExtraChecksum()
v3, ok3 := dec.GetChecksum()
v4, ok4 := dec.GetExtraChecksum()
if len(tt.checksums) == 0 {
require.False(t, ok1)
require.False(t, ok2)
require.False(t, ok3)
require.False(t, ok4)
} else if len(tt.checksums) == 1 {
require.True(t, ok1)
require.False(t, ok2)
require.True(t, ok3)
require.False(t, ok4)
require.Equal(t, tt.checksums[0], v1)
require.Equal(t, tt.checksums[0], v3)
require.Zero(t, v2)
require.Zero(t, v4)
} else {
require.True(t, ok1)
require.True(t, ok2)
require.True(t, ok3)
require.True(t, ok4)
require.Equal(t, tt.checksums[0], v1)
require.Equal(t, tt.checksums[1], v2)
require.Equal(t, tt.checksums[0], v3)
require.Equal(t, tt.checksums[1], v4)
}
})
}
t.Run("ReuseDecoder", func(t *testing.T) {
dec := rowcodec.NewDatumMapDecoder([]rowcodec.ColInfo{}, time.UTC)
raw1, err := enc.Encode(time.UTC, nil, nil, nil)
require.NoError(t, err)
_, err = dec.DecodeToDatumMap(raw1, nil)
require.NoError(t, err)
v1, ok1 := dec.GetChecksum()
v2, ok2 := dec.GetExtraChecksum()
require.False(t, ok1)
require.False(t, ok2)
require.Zero(t, v1)
require.Zero(t, v2)
raw2, err := enc.Encode(time.UTC, nil, nil, nil, 1, 2)
require.NoError(t, err)
_, err = dec.DecodeToDatumMap(raw2, nil)
require.NoError(t, err)
v1, ok1 = dec.GetChecksum()
v2, ok2 = dec.GetExtraChecksum()
require.True(t, ok1)
require.True(t, ok2)
require.Equal(t, uint32(1), v1)
require.Equal(t, uint32(2), v2)
raw3, err := enc.Encode(time.UTC, nil, nil, nil, 1)
require.NoError(t, err)
_, err = dec.DecodeToDatumMap(raw3, nil)
require.NoError(t, err)
v1, ok1 = dec.GetChecksum()
v2, ok2 = dec.GetExtraChecksum()
require.True(t, ok1)
require.False(t, ok2)
require.Equal(t, uint32(1), v1)
require.Zero(t, v2)
})
}
var (
withUnsigned = func(ft *types.FieldType) *types.FieldType {
ft.AddFlag(mysql.UnsignedFlag)
return ft
}
withEnumElems = func(elem ...string) func(ft *types.FieldType) *types.FieldType {
return func(ft *types.FieldType) *types.FieldType {
ft.SetElems(elem)
return ft
}
}
withFsp = func(fsp int) func(ft *types.FieldType) *types.FieldType {
return func(ft *types.FieldType) *types.FieldType {
ft.SetDecimal(fsp)
return ft
}
}
withFlen = func(flen int) func(ft *types.FieldType) *types.FieldType {
return func(ft *types.FieldType) *types.FieldType {
ft.SetFlen(flen)
return ft
}
}
getDuration = func(value string) types.Duration {
dur, _, _ := types.ParseDuration(types.DefaultStmtNoWarningContext, value, 0)
return dur
}
getOldDatumByte = func(d types.Datum) []byte {
b, err := tablecodec.EncodeValue(nil, nil, d)
if err != nil {
panic(err)
}
return b
}
getDatumPoint = func(d types.Datum) *types.Datum {
return &d
}
withFrac = func(f int) func(d types.Datum) types.Datum {
return func(d types.Datum) types.Datum {
d.SetFrac(f)
return d
}
}
withLen = func(l int) func(d types.Datum) types.Datum {
return func(d types.Datum) types.Datum {
d.SetLength(l)
return d
}
}
)
| pkg/util/rowcodec/rowcodec_test.go | 0 | https://github.com/pingcap/tidb/commit/f72e1d966ce316f6f8b391d8f3e328210f04c56f | [
0.0002248880045954138,
0.00017126968305092305,
0.0001608406164450571,
0.00017074411152862012,
0.000006490378837042954
] |
{
"id": 1,
"code_window": [
"\tast.SetVar: {},\n",
"\tast.GetVar: {},\n",
"\tast.ReleaseAllLocks: {},\n",
"}\n",
"\n",
"// DeferredFunctions stores functions which are foldable but should be deferred as well when plan cache is enabled.\n",
"// Note that, these functions must be foldable at first place, i.e, they are not in `unFoldableFunctions`.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"// IsDeferredFunctions checks whether the function is in DeferredFunctions.\n"
],
"file_path": "pkg/expression/function_traits.go",
"type": "add",
"edit_start_line_idx": 135
} | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/pkg/config"
"github.com/pingcap/tidb/pkg/executor/internal/exec"
"github.com/pingcap/tidb/pkg/parser/ast"
"github.com/pingcap/tidb/pkg/tidb-binlog/node"
"github.com/pingcap/tidb/pkg/util/chunk"
)
// ChangeExec represents a change executor.
type ChangeExec struct {
exec.BaseExecutor
*ast.ChangeStmt
}
// Next implements the Executor Next interface.
func (e *ChangeExec) Next(ctx context.Context, _ *chunk.Chunk) error {
kind := strings.ToLower(e.NodeType)
urls := config.GetGlobalConfig().Path
registry, needToClose, err := getOrCreateBinlogRegistry(urls)
if err != nil {
return err
}
if needToClose {
defer func() {
_ = registry.Close()
}()
}
nodes, _, err := registry.Nodes(ctx, node.NodePrefix[kind])
if err != nil {
return err
}
state := e.State
nodeID := e.NodeID
for _, n := range nodes {
if n.NodeID != nodeID {
continue
}
switch state {
case node.Online, node.Pausing, node.Paused, node.Closing, node.Offline:
n.State = state
return registry.UpdateNode(ctx, node.NodePrefix[kind], n)
default:
return errors.Errorf("state %s is illegal", state)
}
}
return errors.NotFoundf("node %s, id %s from etcd %s", kind, nodeID, urls)
}
| pkg/executor/change.go | 0 | https://github.com/pingcap/tidb/commit/f72e1d966ce316f6f8b391d8f3e328210f04c56f | [
0.0002429125743219629,
0.00018046742479782552,
0.00016661861445754766,
0.00017101963749155402,
0.000025715971787576564
] |
{
"id": 1,
"code_window": [
"\tast.SetVar: {},\n",
"\tast.GetVar: {},\n",
"\tast.ReleaseAllLocks: {},\n",
"}\n",
"\n",
"// DeferredFunctions stores functions which are foldable but should be deferred as well when plan cache is enabled.\n",
"// Note that, these functions must be foldable at first place, i.e, they are not in `unFoldableFunctions`.\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep"
],
"after_edit": [
"// IsDeferredFunctions checks whether the function is in DeferredFunctions.\n"
],
"file_path": "pkg/expression/function_traits.go",
"type": "add",
"edit_start_line_idx": 135
} | // Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package charset
import (
"bytes"
"fmt"
"reflect"
"strings"
"unsafe"
"github.com/pingcap/tidb/pkg/parser/mysql"
"github.com/pingcap/tidb/pkg/parser/terror"
"golang.org/x/text/encoding"
"golang.org/x/text/transform"
)
// ErrInvalidCharacterString returns when the string is invalid in the specific charset.
var ErrInvalidCharacterString = terror.ClassParser.NewStd(mysql.ErrInvalidCharacterString)
// encodingBase defines some generic functions.
type encodingBase struct {
enc encoding.Encoding
self Encoding
}
func (encodingBase) MbLen(_ string) int {
return 0
}
func (encodingBase) ToUpper(src string) string {
return strings.ToUpper(src)
}
func (encodingBase) ToLower(src string) string {
return strings.ToLower(src)
}
func (b encodingBase) IsValid(src []byte) bool {
isValid := true
b.self.Foreach(src, opFromUTF8, func(from, to []byte, ok bool) bool {
isValid = ok
return ok
})
return isValid
}
func (b encodingBase) Transform(dest *bytes.Buffer, src []byte, op Op) (result []byte, err error) {
if dest == nil {
dest = &bytes.Buffer{}
dest.Grow(len(src))
}
dest.Reset()
b.self.Foreach(src, op, func(from, to []byte, ok bool) bool {
if !ok {
if err == nil && (op&opSkipError == 0) {
err = generateEncodingErr(b.self.Name(), from)
}
if op&opTruncateTrim != 0 {
return false
}
if op&opTruncateReplace != 0 {
dest.WriteByte('?')
return true
}
}
if op&opCollectFrom != 0 {
dest.Write(from)
} else if op&opCollectTo != 0 {
dest.Write(to)
}
return true
})
return dest.Bytes(), err
}
func (b encodingBase) Foreach(src []byte, op Op, fn func(from, to []byte, ok bool) bool) {
var tfm transform.Transformer
var peek func([]byte) []byte
if op&opFromUTF8 != 0 {
tfm = b.enc.NewEncoder()
peek = EncodingUTF8Impl.Peek
} else {
tfm = b.enc.NewDecoder()
peek = b.self.Peek
}
var buf [4]byte
for i, w := 0, 0; i < len(src); i += w {
w = len(peek(src[i:]))
nDst, _, err := tfm.Transform(buf[:], src[i:i+w], false)
meetErr := err != nil || (op&opToUTF8 != 0 && beginWithReplacementChar(buf[:nDst]))
if !fn(src[i:i+w], buf[:nDst], !meetErr) {
return
}
}
}
// replacementBytes are bytes for the replacement rune 0xfffd.
var replacementBytes = []byte{0xEF, 0xBF, 0xBD}
// beginWithReplacementChar check if dst has the prefix '0xEFBFBD'.
func beginWithReplacementChar(dst []byte) bool {
return bytes.HasPrefix(dst, replacementBytes)
}
// generateEncodingErr generates an invalid string in charset error.
func generateEncodingErr(name string, invalidBytes []byte) error {
arg := fmt.Sprintf("%X", invalidBytes)
return ErrInvalidCharacterString.FastGenByArgs(name, arg)
}
// HackSlice converts string to slice without copy.
// Use at your own risk.
func HackSlice(s string) (b []byte) {
pBytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pString := (*reflect.StringHeader)(unsafe.Pointer(&s))
pBytes.Data = pString.Data
pBytes.Len = pString.Len
pBytes.Cap = pString.Len
return
}
// HackString converts slice to string without copy.
// Use it at your own risk.
func HackString(b []byte) (s string) {
if len(b) == 0 {
return ""
}
pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
pstring.Data = pbytes.Data
pstring.Len = pbytes.Len
return
}
| pkg/parser/charset/encoding_base.go | 0 | https://github.com/pingcap/tidb/commit/f72e1d966ce316f6f8b391d8f3e328210f04c56f | [
0.00032925562118180096,
0.00018554534472059458,
0.00016559765208512545,
0.00017102909623645246,
0.00004185786747257225
] |
{
"id": 0,
"code_window": [
"\t\tIt(name, func() {\n",
"\t\t\tnodePreparer := framework.NewE2ETestNodePreparer(\n",
"\t\t\t\tf.ClientSet,\n",
"\t\t\t\tmap[int]testutils.PrepareNodeStrategy{nodeCount: &testutils.TrivialNodePrepareStrategy{}},\n",
"\t\t\t)\n",
"\t\t\tframework.ExpectNoError(nodePreparer.PrepareNodes())\n",
"\t\t\tdefer nodePreparer.CleanupNodes()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},\n"
],
"file_path": "test/e2e/density.go",
"type": "replace",
"edit_start_line_idx": 465
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
retries = 5
)
type IntegrationTestNodePreparer struct {
client clientset.Interface
countToStrategy map[int]testutils.PrepareNodeStrategy
nodeNamePrefix string
}
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
return &IntegrationTestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeNamePrefix: nodeNamePrefix,
}
}
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
numNodes := 0
for k := range p.countToStrategy {
numNodes += k
}
glog.Infof("Making %d nodes", numNodes)
baseNode := &api.Node{
ObjectMeta: api.ObjectMeta{
GenerateName: p.nodeNamePrefix,
},
Spec: api.NodeSpec{
// TODO: investigate why this is needed.
ExternalID: "foo",
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: api.NodeRunning,
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
for i := 0; i < numNodes; i++ {
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
glog.Fatalf("Error creating node: %v", err)
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
index := 0
sum := 0
for k, strategy := range p.countToStrategy {
sum += k
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
}
}
return nil
}
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &api.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}
return nil
}
| test/integration/framework/perf_utils.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.007335940841585398,
0.0010578591609373689,
0.00016555249749217182,
0.00017376866890117526,
0.0020467538852244616
] |
{
"id": 0,
"code_window": [
"\t\tIt(name, func() {\n",
"\t\t\tnodePreparer := framework.NewE2ETestNodePreparer(\n",
"\t\t\t\tf.ClientSet,\n",
"\t\t\t\tmap[int]testutils.PrepareNodeStrategy{nodeCount: &testutils.TrivialNodePrepareStrategy{}},\n",
"\t\t\t)\n",
"\t\t\tframework.ExpectNoError(nodePreparer.PrepareNodes())\n",
"\t\t\tdefer nodePreparer.CleanupNodes()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},\n"
],
"file_path": "test/e2e/density.go",
"type": "replace",
"edit_start_line_idx": 465
} | #!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is intended to start the docker and then loop until
# it detects a failure. It then exits, and supervisord restarts it
# which in turn restarts docker.
main() {
if ! healthy 60; then
stop_docker
start_docker
echo "waiting 30s for startup"
sleep 30
healthy 60
fi
while healthy; do
sleep 10
done
echo "Docker failed!"
exit 2
}
# Performs health check on docker. If a parameter is passed, it is treated as
# the number of seconds to keep trying for a healthy result. If none is passed
# we make only one attempt.
healthy() {
max_retry_sec="$1"
shift
starttime=$(date +%s)
while ! timeout 60 docker ps > /dev/null; do
if [[ -z "$max_retry_sec" || $(( $(date +%s) - starttime )) -gt "$max_retry_sec" ]]; then
echo "docker ps did not succeed"
return 2
else
echo "waiting 5s before retry"
sleep 5
fi
done
echo "docker is healthy"
return 0
}
stop_docker() {
/etc/init.d/docker stop
# Make sure docker gracefully terminated before start again
starttime=`date +%s`
while pidof docker > /dev/null; do
currenttime=`date +%s`
((elapsedtime = currenttime - starttime))
# after 60 seconds, forcefully terminate docker process
if test $elapsedtime -gt 60; then
echo "attempting to kill docker process with sigkill signal"
kill -9 `pidof docker` || sleep 10
else
echo "waiting clean shutdown"
sleep 10
fi
done
}
start_docker() {
echo "docker is not running. starting docker"
# cleanup docker network checkpoint to avoid running into known issue
# of docker (https://github.com/docker/docker/issues/18283)
rm -rf /var/lib/docker/network
/etc/init.d/docker start
}
main
| cluster/saltbase/salt/supervisor/docker-checker.sh | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017644456238485873,
0.00017225550254806876,
0.0001613669010112062,
0.00017297649173997343,
0.000004221490144118434
] |
{
"id": 0,
"code_window": [
"\t\tIt(name, func() {\n",
"\t\t\tnodePreparer := framework.NewE2ETestNodePreparer(\n",
"\t\t\t\tf.ClientSet,\n",
"\t\t\t\tmap[int]testutils.PrepareNodeStrategy{nodeCount: &testutils.TrivialNodePrepareStrategy{}},\n",
"\t\t\t)\n",
"\t\t\tframework.ExpectNoError(nodePreparer.PrepareNodes())\n",
"\t\t\tdefer nodePreparer.CleanupNodes()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},\n"
],
"file_path": "test/e2e/density.go",
"type": "replace",
"edit_start_line_idx": 465
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package runtime includes helper functions for working with API objects
// that follow the kubernetes API object conventions, which are:
//
// 0. Your API objects have a common metadata struct member, TypeMeta.
// 1. Your code refers to an internal set of API objects.
// 2. In a separate package, you have an external set of API objects.
// 3. The external set is considered to be versioned, and no breaking
// changes are ever made to it (fields may be added but not changed
// or removed).
// 4. As your api evolves, you'll make an additional versioned package
// with every major change.
// 5. Versioned packages have conversion functions which convert to
// and from the internal version.
// 6. You'll continue to support older versions according to your
// deprecation policy, and you can easily provide a program/library
// to update old versions into new versions because of 5.
// 7. All of your serializations and deserializations are handled in a
// centralized place.
//
// Package runtime provides a conversion helper to make 5 easy, and the
// Encode/Decode/DecodeInto trio to accomplish 7. You can also register
// additional "codecs" which use a version of your choice. It's
// recommended that you register your types with runtime in your
// package's init function.
//
// As a bonus, a few common types useful from all api objects and versions
// are provided in types.go.
package runtime // import "k8s.io/kubernetes/pkg/runtime"
| pkg/runtime/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017854824545793235,
0.00017085441504605114,
0.0001629898470127955,
0.00016998581122606993,
0.000005245351985649904
] |
{
"id": 0,
"code_window": [
"\t\tIt(name, func() {\n",
"\t\t\tnodePreparer := framework.NewE2ETestNodePreparer(\n",
"\t\t\t\tf.ClientSet,\n",
"\t\t\t\tmap[int]testutils.PrepareNodeStrategy{nodeCount: &testutils.TrivialNodePrepareStrategy{}},\n",
"\t\t\t)\n",
"\t\t\tframework.ExpectNoError(nodePreparer.PrepareNodes())\n",
"\t\t\tdefer nodePreparer.CleanupNodes()\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},\n"
],
"file_path": "test/e2e/density.go",
"type": "replace",
"edit_start_line_idx": 465
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = ["etcd.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/errors:go_default_library",
"//pkg/api/errors/storage:go_default_library",
"//pkg/api/rest:go_default_library",
"//pkg/api/unversioned:go_default_library",
"//pkg/registry/cachesize:go_default_library",
"//pkg/registry/core/namespace:go_default_library",
"//pkg/registry/generic:go_default_library",
"//pkg/registry/generic/registry:go_default_library",
"//pkg/runtime:go_default_library",
"//pkg/storage:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["etcd_test.go"],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/unversioned:go_default_library",
"//pkg/fields:go_default_library",
"//pkg/labels:go_default_library",
"//pkg/registry/generic:go_default_library",
"//pkg/registry/registrytest:go_default_library",
"//pkg/storage/etcd/etcdtest:go_default_library",
"//pkg/storage/etcd/testing:go_default_library",
],
)
| pkg/registry/core/namespace/etcd/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017682639008853585,
0.00017373412265442312,
0.00017036747885867953,
0.0001728352071950212,
0.000002353918262087973
] |
{
"id": 1,
"code_window": [
"\tclient clientset.Interface\n",
"\t// Specifies how many nodes should be modified using the given strategy.\n",
"\t// Only one strategy can be applied to a single Node, so there needs to\n",
"\t// be at least <sum_of_keys> Nodes in the cluster.\n",
"\tcountToStrategy map[int]testutils.PrepareNodeStrategy\n",
"\tnodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcountToStrategy []testutils.CountToStrategy\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"fmt"
"math"
"testing"
"time"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
threshold3K = 100
threshold30K = 30
threshold60K = 30
)
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
func TestSchedule100Node3KPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
if min := schedulePods(100, 3000); min < threshold3K {
t.Errorf("To small pod scheduling throughput for 3k pods. Expected %v got %v", threshold3K, min)
} else {
fmt.Printf("Minimal observed throughput for 3k pod test: %v\n", min)
}
}
// TestSchedule1000Node30KPods schedules 30k pods on 1000 nodes.
func TestSchedule1000Node30KPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
if min := schedulePods(1000, 30000); min < threshold30K {
t.Errorf("To small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
} else {
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
}
}
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
// This test won't fit in normal 10 minutes time window.
// func TestSchedule2000Node60KPods(t *testing.T) {
// if testing.Short() {
// t.Skip("Skipping because we want to run short tests")
// }
// if min := schedulePods(2000, 60000); min < threshold60K {
// t.Errorf("To small pod scheduling throughput for 60k pods. Expected %v got %v", threshold60K, min)
// } else {
// fmt.Printf("Minimal observed throughput for 60k pod test: %v\n", min)
// }
// }
// schedulePods schedules specific number of pods on specific number of nodes.
// This is used to learn the scheduling throughput on various
// sizes of cluster and changes as more and more pods are scheduled.
// It won't stop until all pods are scheduled.
// It retruns the minimum of throughput over whole run.
func schedulePods(numNodes, numPods int) int32 {
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
defer destroyFunc()
c := schedulerConfigFactory.Client
nodePreparer := framework.NewIntegrationTestNodePreparer(
c,
map[int]testutils.PrepareNodeStrategy{numNodes: &testutils.TrivialNodePrepareStrategy{}},
"scheduler-perf-",
)
if err := nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer nodePreparer.CleanupNodes()
makePodsFromRC(c, "rc1", numPods)
prev := 0
minQps := int32(math.MaxInt32)
start := time.Now()
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= numPods {
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average).\n",
numPods, int(time.Since(start)/time.Second), numPods/int(time.Since(start)/time.Second))
return minQps
}
// There's no point in printing it for the last iteration, as the value is random
qps := len(scheduled) - prev
if int32(qps) < minQps {
minQps = int32(qps)
}
fmt.Printf("%ds\trate: %d\ttotal: %d\n", time.Since(start)/time.Second, qps, len(scheduled))
prev = len(scheduled)
time.Sleep(1 * time.Second)
}
}
| test/integration/scheduler_perf/scheduler_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.013923096470534801,
0.0013626228319481015,
0.00016790135123301297,
0.00018610418192110956,
0.0037877869326621294
] |
{
"id": 1,
"code_window": [
"\tclient clientset.Interface\n",
"\t// Specifies how many nodes should be modified using the given strategy.\n",
"\t// Only one strategy can be applied to a single Node, so there needs to\n",
"\t// be at least <sum_of_keys> Nodes in the cluster.\n",
"\tcountToStrategy map[int]testutils.PrepareNodeStrategy\n",
"\tnodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcountToStrategy []testutils.CountToStrategy\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1000
} | #!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script assumes that the environment variable SERVER_BINARY_TAR contains
# the release tar to download and unpack. It is meant to be pushed to the
# master and run.
echo "Unpacking Salt tree"
rm -rf kubernetes
tar xzf "${SALT_TAR}"
echo "Running release install script"
sudo kubernetes/saltbase/install.sh "${SERVER_BINARY_TAR}"
| cluster/photon-controller/templates/install-release.sh | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017796321481000632,
0.0001753252581693232,
0.00017131325148511678,
0.0001766992936609313,
0.0000028834576824010583
] |
{
"id": 1,
"code_window": [
"\tclient clientset.Interface\n",
"\t// Specifies how many nodes should be modified using the given strategy.\n",
"\t// Only one strategy can be applied to a single Node, so there needs to\n",
"\t// be at least <sum_of_keys> Nodes in the cluster.\n",
"\tcountToStrategy map[int]testutils.PrepareNodeStrategy\n",
"\tnodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcountToStrategy []testutils.CountToStrategy\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1000
} | //
// Copyright 2016, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cloudstack
import (
"encoding/json"
"net/url"
)
type AddStratosphereSspParams struct {
p map[string]interface{}
}
func (p *AddStratosphereSspParams) toURLValues() url.Values {
u := url.Values{}
if p.p == nil {
return u
}
if v, found := p.p["name"]; found {
u.Set("name", v.(string))
}
if v, found := p.p["password"]; found {
u.Set("password", v.(string))
}
if v, found := p.p["tenantuuid"]; found {
u.Set("tenantuuid", v.(string))
}
if v, found := p.p["url"]; found {
u.Set("url", v.(string))
}
if v, found := p.p["username"]; found {
u.Set("username", v.(string))
}
if v, found := p.p["zoneid"]; found {
u.Set("zoneid", v.(string))
}
return u
}
func (p *AddStratosphereSspParams) SetName(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["name"] = v
return
}
func (p *AddStratosphereSspParams) SetPassword(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["password"] = v
return
}
func (p *AddStratosphereSspParams) SetTenantuuid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["tenantuuid"] = v
return
}
func (p *AddStratosphereSspParams) SetUrl(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["url"] = v
return
}
func (p *AddStratosphereSspParams) SetUsername(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["username"] = v
return
}
func (p *AddStratosphereSspParams) SetZoneid(v string) {
if p.p == nil {
p.p = make(map[string]interface{})
}
p.p["zoneid"] = v
return
}
// You should always use this function to get a new AddStratosphereSspParams instance,
// as then you are sure you have configured all required params
func (s *StratosphereSSPService) NewAddStratosphereSspParams(name string, url string, zoneid string) *AddStratosphereSspParams {
p := &AddStratosphereSspParams{}
p.p = make(map[string]interface{})
p.p["name"] = name
p.p["url"] = url
p.p["zoneid"] = zoneid
return p
}
// Adds stratosphere ssp server
func (s *StratosphereSSPService) AddStratosphereSsp(p *AddStratosphereSspParams) (*AddStratosphereSspResponse, error) {
resp, err := s.cs.newRequest("addStratosphereSsp", p.toURLValues())
if err != nil {
return nil, err
}
var r AddStratosphereSspResponse
if err := json.Unmarshal(resp, &r); err != nil {
return nil, err
}
return &r, nil
}
type AddStratosphereSspResponse struct {
Hostid string `json:"hostid,omitempty"`
Name string `json:"name,omitempty"`
Url string `json:"url,omitempty"`
Zoneid string `json:"zoneid,omitempty"`
}
| vendor/github.com/xanzy/go-cloudstack/cloudstack/StratosphereSSPService.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0008915947400964797,
0.00029164834995754063,
0.0001642186543904245,
0.0002048945752903819,
0.00019756521214731038
] |
{
"id": 1,
"code_window": [
"\tclient clientset.Interface\n",
"\t// Specifies how many nodes should be modified using the given strategy.\n",
"\t// Only one strategy can be applied to a single Node, so there needs to\n",
"\t// be at least <sum_of_keys> Nodes in the cluster.\n",
"\tcountToStrategy map[int]testutils.PrepareNodeStrategy\n",
"\tnodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcountToStrategy []testutils.CountToStrategy\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This package is generated by client-gen with arguments: --clientset-name=federation_internalclientset --clientset-path=k8s.io/kubernetes/federation/client/clientset_generated --included-types-overrides=[api/Service,api/Namespace,extensions/ReplicaSet,api/Secret,extensions/Ingress,extensions/Deployment,extensions/DaemonSet,api/ConfigMap,api/Event] --input=[../../federation/apis/federation/,api/,extensions/]
// This package has the automatically generated typed clients.
package unversioned
| federation/client/clientset_generated/federation_internalclientset/typed/extensions/unversioned/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017915073840413243,
0.00017367477994412184,
0.00017029762966558337,
0.00017157594265881926,
0.000003907103746314533
] |
{
"id": 2,
"code_window": [
"\tnodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy\n",
"}\n",
"\n",
"func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy) testutils.TestNodePreparer {\n",
"\treturn &E2ETestNodePreparer{\n",
"\t\tclient: client,\n",
"\t\tcountToStrategy: countToStrategy,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer {\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1004
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"math"
"os"
"time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid"
"github.com/golang/glog"
)
const (
// String used to mark pod deletion
nonExist = "NonExist"
)
type RCConfig struct {
Client *client.Client
Image string
Command []string
Name string
Namespace string
PollInterval time.Duration
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
ReadinessProbe *api.Probe
DNSPolicy *api.DNSPolicy
// Env vars, set the same for every pod.
Env map[string]string
// Extra labels added to every pod.
Labels map[string]string
// Node selector for pods in the RC.
NodeSelector map[string]string
// Ports to declare in the container (map of name to containerPort).
Ports map[string]int
// Ports to declare in the container as host and container ports.
HostPorts map[string]int
Volumes []api.Volume
VolumeMounts []api.VolumeMount
// Pointer to a list of pods; if non-nil, will be set to a list of pods
// created by this RC by RunRC.
CreatedPods *[]*api.Pod
// Maximum allowable container failures. If exceeded, RunRC returns an error.
// Defaults to replicas*0.1 if unspecified.
MaxContainerFailures *int
// If set to false starting RC will print progress, otherwise only errors will be printed.
Silent bool
// If set this function will be used to print log lines instead of glog.
LogFunc func(fmt string, args ...interface{})
// If set those functions will be used to gather data from Nodes - in integration tests where no
// kubelets are running those variables should be nil.
NodeDumpFunc func(c *client.Client, nodeNames []string, logFunc func(fmt string, args ...interface{}))
ContainerDumpFunc func(c *client.Client, ns string, logFunc func(ftm string, args ...interface{}))
}
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
if rc.LogFunc != nil {
rc.LogFunc(fmt, args...)
}
glog.Infof(fmt, args...)
}
type DeploymentConfig struct {
RCConfig
}
type ReplicaSetConfig struct {
RCConfig
}
// podInfo contains pod information useful for debugging e2e tests.
type podInfo struct {
oldHostname string
oldPhase string
hostname string
phase string
}
// PodDiff is a map of pod name to podInfos
type PodDiff map[string]*podInfo
// Print formats and prints the give PodDiff.
func (p PodDiff) String(ignorePhases sets.String) string {
ret := ""
for name, info := range p {
if ignorePhases.Has(info.phase) {
continue
}
if info.phase == nonExist {
ret += fmt.Sprintf("Pod %v was deleted, had phase %v and host %v\n", name, info.oldPhase, info.oldHostname)
continue
}
phaseChange, hostChange := false, false
msg := fmt.Sprintf("Pod %v ", name)
if info.oldPhase != info.phase {
phaseChange = true
if info.oldPhase == nonExist {
msg += fmt.Sprintf("in phase %v ", info.phase)
} else {
msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase)
}
}
if info.oldHostname != info.hostname {
hostChange = true
if info.oldHostname == nonExist || info.oldHostname == "" {
msg += fmt.Sprintf("assigned host %v ", info.hostname)
} else {
msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname)
}
}
if phaseChange || hostChange {
ret += msg + "\n"
}
}
return ret
}
// Diff computes a PodDiff given 2 lists of pods.
func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
podInfoMap := PodDiff{}
// New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist.
for _, pod := range curPods {
podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.NodeName, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist}
}
// Deleted pods will show up in the oldPods list but not in curPods. They have a hostname/phase == nonexist.
for _, pod := range oldPods {
if info, ok := podInfoMap[pod.Name]; ok {
info.oldHostname, info.oldPhase = pod.Spec.NodeName, string(pod.Status.Phase)
} else {
podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.NodeName, oldPhase: string(pod.Status.Phase)}
}
}
return podInfoMap
}
// RunDeployment Launches (and verifies correctness) of a Deployment
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunDeployment(config DeploymentConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *DeploymentConfig) create() error {
deployment := &extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.DeploymentSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&deployment.Spec.Template)
_, err := config.Client.Deployments(config.Namespace).Create(deployment)
if err != nil {
return fmt.Errorf("Error creating deployment: %v", err)
}
config.RCConfigLog("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, deployment.Spec.Replicas)
return nil
}
// RunReplicaSet launches (and verifies correctness) of a ReplicaSet
// and waits until all the pods it launches to reach the "Running" state.
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunReplicaSet(config ReplicaSetConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *ReplicaSetConfig) create() error {
rs := &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&rs.Spec.Template)
_, err := config.Client.ReplicaSets(config.Namespace).Create(rs)
if err != nil {
return fmt.Errorf("Error creating replica set: %v", err)
}
config.RCConfigLog("Created replica set with name: %v, namespace: %v, replica count: %v", rs.Name, config.Namespace, rs.Spec.Replicas)
return nil
}
// RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunRC(config RCConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *RCConfig) create() error {
dnsDefault := api.DNSDefault
if config.DNSPolicy == nil {
config.DNSPolicy = &dnsDefault
}
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: api.ReplicationControllerSpec{
Replicas: int32(config.Replicas),
Selector: map[string]string{
"name": config.Name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
},
},
DNSPolicy: *config.DNSPolicy,
NodeSelector: config.NodeSelector,
},
},
},
}
config.applyTo(rc.Spec.Template)
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
if err != nil {
return fmt.Errorf("Error creating replication controller: %v", err)
}
config.RCConfigLog("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
return nil
}
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
}
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
}
if config.NodeSelector != nil {
template.Spec.NodeSelector = make(map[string]string)
for k, v := range config.NodeSelector {
template.Spec.NodeSelector[k] = v
}
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
}
}
if config.HostPorts != nil {
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
}
if config.CpuLimit > 0 || config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
if len(config.Volumes) > 0 {
template.Spec.Volumes = config.Volumes
}
if len(config.VolumeMounts) > 0 {
template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
}
}
type RCStartupStatus struct {
Expected int
Terminating int
Running int
RunningButNotReady int
Waiting int
Pending int
Unknown int
Inactive int
FailedContainers int
Created []*api.Pod
ContainerRestartNodes sets.String
}
func (s *RCStartupStatus) String(name string) string {
return fmt.Sprintf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
name, len(s.Created), s.Expected, s.Running, s.Pending, s.Waiting, s.Inactive, s.Terminating, s.Unknown, s.RunningButNotReady)
}
func ComputeRCStartupStatus(pods []*api.Pod, expected int) RCStartupStatus {
startupStatus := RCStartupStatus{
Expected: expected,
Created: make([]*api.Pod, 0, expected),
ContainerRestartNodes: sets.NewString(),
}
for _, p := range pods {
if p.DeletionTimestamp != nil {
startupStatus.Terminating++
continue
}
startupStatus.Created = append(startupStatus.Created, p)
if p.Status.Phase == api.PodRunning {
ready := false
for _, c := range p.Status.Conditions {
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
ready = true
break
}
}
if ready {
// Only count a pod is running when it is also ready.
startupStatus.Running++
} else {
startupStatus.RunningButNotReady++
}
for _, v := range FailedContainers(p) {
startupStatus.FailedContainers = startupStatus.FailedContainers + v.Restarts
startupStatus.ContainerRestartNodes.Insert(p.Spec.NodeName)
}
} else if p.Status.Phase == api.PodPending {
if p.Spec.NodeName == "" {
startupStatus.Waiting++
} else {
startupStatus.Pending++
}
} else if p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed {
startupStatus.Inactive++
} else if p.Status.Phase == api.PodUnknown {
startupStatus.Unknown++
}
}
return startupStatus
}
func (config *RCConfig) start() error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
} else {
maxContainerFailures = *config.MaxContainerFailures
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
PodStore := NewPodStore(config.Client, config.Namespace, label, fields.Everything())
defer PodStore.Stop()
interval := config.PollInterval
if interval <= 0 {
interval = 10 * time.Second
}
timeout := config.Timeout
if timeout <= 0 {
timeout = 5 * time.Minute
}
oldPods := make([]*api.Pod, 0)
oldRunning := 0
lastChange := time.Now()
for oldRunning != config.Replicas {
time.Sleep(interval)
pods := PodStore.List()
startupStatus := ComputeRCStartupStatus(pods, config.Replicas)
pods = startupStatus.Created
if config.CreatedPods != nil {
*config.CreatedPods = pods
}
if !config.Silent {
config.RCConfigLog(startupStatus.String(config.Name))
}
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", startupStatus.Running, startupStatus.Pending, startupStatus.Waiting, startupStatus.Inactive, startupStatus.Unknown, startupStatus.RunningButNotReady)
}
if startupStatus.FailedContainers > maxContainerFailures {
if config.NodeDumpFunc != nil {
config.NodeDumpFunc(config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog)
}
if config.ContainerDumpFunc != nil {
// Get the logs from the failed containers to help diagnose what caused them to fail
config.ContainerDumpFunc(config.Client, config.Namespace, config.RCConfigLog)
}
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
}
if len(pods) < len(oldPods) || len(pods) > config.Replicas {
// This failure mode includes:
// kubelet is dead, so node controller deleted pods and rc creates more
// - diagnose by noting the pod diff below.
// pod is unhealthy, so replication controller creates another to take its place
// - diagnose by comparing the previous "2 Pod states" lines for inactive pods
errorStr := fmt.Sprintf("Number of reported pods for %s changed: %d vs %d", config.Name, len(pods), len(oldPods))
config.RCConfigLog("%v, pods that changed since the last iteration:", errorStr)
config.RCConfigLog(Diff(oldPods, pods).String(sets.NewString()))
return fmt.Errorf(errorStr)
}
if len(pods) > len(oldPods) || startupStatus.Running > oldRunning {
lastChange = time.Now()
}
oldPods = pods
oldRunning = startupStatus.Running
if time.Since(lastChange) > timeout {
break
}
}
if oldRunning != config.Replicas {
// List only pods from a given replication controller.
options := api.ListOptions{LabelSelector: label}
if pods, err := config.Client.Pods(api.NamespaceAll).List(options); err == nil {
for _, pod := range pods.Items {
config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
}
} else {
config.RCConfigLog("Can't list pod debug info: %v", err)
}
return fmt.Errorf("Only %d pods started out of %d", oldRunning, config.Replicas)
}
return nil
}
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string,
pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
}
startPodsID := string(uuid.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Pods(namespace).Create(&pod)
if err != nil {
return err
}
}
logFunc("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
if err != nil {
return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
}
}
return nil
}
// Wait up to 10 minutes for all matching pods to become Running and at least one
// matching pod exists.
func WaitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error {
running := false
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
waitLoop:
for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) {
pods := PodStore.List()
if len(pods) == 0 {
continue waitLoop
}
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
continue waitLoop
}
}
running = true
break
}
if !running {
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
}
return nil
}
type TestNodePreparer interface {
PrepareNodes() error
CleanupNodes() error
}
type PrepareNodeStrategy interface {
PreparePatch(node *api.Node) []byte
CleanupNode(node *api.Node) *api.Node
}
type TrivialNodePrepareStrategy struct{}
func (*TrivialNodePrepareStrategy) PreparePatch(*api.Node) []byte {
return []byte{}
}
func (*TrivialNodePrepareStrategy) CleanupNode(node *api.Node) *api.Node {
nodeCopy := *node
return &nodeCopy
}
func DoPrepareNode(client clientset.Interface, node *api.Node, strategy PrepareNodeStrategy) error {
var err error
patch := strategy.PreparePatch(node)
if len(patch) == 0 {
return nil
}
for attempt := 0; attempt < retries; attempt++ {
if _, err = client.Core().Nodes().Patch(node.Name, api.MergePatchType, []byte(patch)); err == nil {
return nil
}
if !apierrs.IsConflict(err) {
return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("To many conflicts when applying patch %v to Node %v", string(patch), node.Name)
}
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
for attempt := 0; attempt < retries; attempt++ {
node, err := client.Core().Nodes().Get(nodeName)
if err != nil {
return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
}
updatedNode := strategy.CleanupNode(node)
if api.Semantic.DeepEqual(node, updatedNode) {
return nil
}
if _, err = client.Core().Nodes().Update(updatedNode); err == nil {
return nil
}
if !apierrs.IsConflict(err) {
return fmt.Errorf("Error when updating Node %v: %v", nodeName, err)
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("To many conflicts when trying to cleanup Node %v", nodeName)
}
| test/utils/runners.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.029856141656637192,
0.0009386509191244841,
0.00016513674927409738,
0.00017366331303492188,
0.003919961862266064
] |
{
"id": 2,
"code_window": [
"\tnodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy\n",
"}\n",
"\n",
"func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy) testutils.TestNodePreparer {\n",
"\treturn &E2ETestNodePreparer{\n",
"\t\tclient: client,\n",
"\t\tcountToStrategy: countToStrategy,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer {\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1004
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package field
import (
"bytes"
"fmt"
"strconv"
)
// Path represents the path from some root to a particular field.
type Path struct {
name string // the name of this field or "" if this is an index
index string // if name == "", this is a subscript (index or map key) of the previous element
parent *Path // nil if this is the root element
}
// NewPath creates a root Path object.
func NewPath(name string, moreNames ...string) *Path {
r := &Path{name: name, parent: nil}
for _, anotherName := range moreNames {
r = &Path{name: anotherName, parent: r}
}
return r
}
// Root returns the root element of this Path.
func (p *Path) Root() *Path {
for ; p.parent != nil; p = p.parent {
// Do nothing.
}
return p
}
// Child creates a new Path that is a child of the method receiver.
func (p *Path) Child(name string, moreNames ...string) *Path {
r := NewPath(name, moreNames...)
r.Root().parent = p
return r
}
// Index indicates that the previous Path is to be subscripted by an int.
// This sets the same underlying value as Key.
func (p *Path) Index(index int) *Path {
return &Path{index: strconv.Itoa(index), parent: p}
}
// Key indicates that the previous Path is to be subscripted by a string.
// This sets the same underlying value as Index.
func (p *Path) Key(key string) *Path {
return &Path{index: key, parent: p}
}
// String produces a string representation of the Path.
func (p *Path) String() string {
// make a slice to iterate
elems := []*Path{}
for ; p != nil; p = p.parent {
elems = append(elems, p)
}
// iterate, but it has to be backwards
buf := bytes.NewBuffer(nil)
for i := range elems {
p := elems[len(elems)-1-i]
if p.parent != nil && len(p.name) > 0 {
// This is either the root or it is a subscript.
buf.WriteString(".")
}
if len(p.name) > 0 {
buf.WriteString(p.name)
} else {
fmt.Fprintf(buf, "[%s]", p.index)
}
}
return buf.String()
}
| pkg/util/validation/field/path.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00026696943677961826,
0.00018947722855955362,
0.0001673566148383543,
0.00017556516104377806,
0.000028702297640847974
] |
{
"id": 2,
"code_window": [
"\tnodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy\n",
"}\n",
"\n",
"func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy) testutils.TestNodePreparer {\n",
"\treturn &E2ETestNodePreparer{\n",
"\t\tclient: client,\n",
"\t\tcountToStrategy: countToStrategy,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer {\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1004
} |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| vendor/github.com/docker/spdystream/LICENSE | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0001754877157509327,
0.00016984650574158877,
0.00016699350089766085,
0.000168878206750378,
0.0000024700677840883145
] |
{
"id": 2,
"code_window": [
"\tnodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy\n",
"}\n",
"\n",
"func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy) testutils.TestNodePreparer {\n",
"\treturn &E2ETestNodePreparer{\n",
"\t\tclient: client,\n",
"\t\tcountToStrategy: countToStrategy,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer {\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1004
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package persistentvolumeclaim // import "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim"
| pkg/registry/core/persistentvolumeclaim/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017573803779669106,
0.00017492310144007206,
0.00017410816508345306,
0.00017492310144007206,
8.149363566190004e-7
] |
{
"id": 4,
"code_window": [
"\t\tfor ; index < sum; index++ {\n",
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {\n",
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1026
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"math"
"os"
"sort"
"strconv"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
MinSaturationThreshold = 2 * time.Minute
MinPodsPerSecondThroughput = 8
)
// Maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0
type DensityTestConfig struct {
Configs []testutils.RCConfig
Client *client.Client
ClientSet internalclientset.Interface
Namespace string
PollInterval time.Duration
PodCount int
Timeout time.Duration
}
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
var apiserverMem uint64
var controllerMem uint64
var schedulerMem uint64
apiserverCPU := math.MaxFloat32
apiserverMem = math.MaxUint64
controllerCPU := math.MaxFloat32
controllerMem = math.MaxUint64
schedulerCPU := math.MaxFloat32
schedulerMem = math.MaxUint64
framework.Logf("Setting resource constraings for provider: %s", framework.TestContext.Provider)
if framework.ProviderIs("kubemark") {
if numNodes <= 5 {
apiserverCPU = 0.35
apiserverMem = 150 * (1024 * 1024)
controllerCPU = 0.1
controllerMem = 100 * (1024 * 1024)
schedulerCPU = 0.05
schedulerMem = 50 * (1024 * 1024)
} else if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1500 * (1024 * 1024)
controllerCPU = 0.75
controllerMem = 750 * (1024 * 1024)
schedulerCPU = 0.75
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 500 {
apiserverCPU = 2.5
apiserverMem = 3400 * (1024 * 1024)
controllerCPU = 1.3
controllerMem = 1100 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 1000 {
apiserverCPU = 4
apiserverMem = 4000 * (1024 * 1024)
controllerCPU = 3
controllerMem = 2000 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 750 * (1024 * 1024)
}
} else {
if numNodes <= 100 {
// TODO: Investigate higher apiserver consumption and
// potentially revert to 1.5cpu and 1.3GB - see #30871
apiserverCPU = 1.8
apiserverMem = 2200 * (1024 * 1024)
controllerCPU = 0.5
controllerMem = 300 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 150 * (1024 * 1024)
}
}
constraints := make(map[string]framework.ResourceConstraint)
constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 250 * (1024 * 1024),
}
constraints["elasticsearch-logging"] = framework.ResourceConstraint{
CPUConstraint: 2,
// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
MemoryConstraint: 5000 * (1024 * 1024),
}
constraints["heapster"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 1800 * (1024 * 1024),
}
constraints["kibana-logging"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 100 * (1024 * 1024),
}
constraints["kube-proxy"] = framework.ResourceConstraint{
CPUConstraint: 0.1,
MemoryConstraint: 20 * (1024 * 1024),
}
constraints["l7-lb-controller"] = framework.ResourceConstraint{
CPUConstraint: 0.15,
MemoryConstraint: 60 * (1024 * 1024),
}
constraints["influxdb"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 500 * (1024 * 1024),
}
constraints["kube-apiserver"] = framework.ResourceConstraint{
CPUConstraint: apiserverCPU,
MemoryConstraint: apiserverMem,
}
constraints["kube-controller-manager"] = framework.ResourceConstraint{
CPUConstraint: controllerCPU,
MemoryConstraint: controllerMem,
}
constraints["kube-scheduler"] = framework.ResourceConstraint{
CPUConstraint: schedulerCPU,
MemoryConstraint: schedulerMem,
}
return constraints
}
func logPodStartupStatus(c *client.Client, expectedPods int, ns string, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels))
podStore := testutils.NewPodStore(c, ns, label, fields.Everything())
defer podStore.Stop()
ticker := time.NewTicker(period)
defer ticker.Stop()
for {
select {
case <-ticker.C:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
case <-stopCh:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
return
}
}
}
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
defer GinkgoRecover()
// Create a listener for events.
// eLock is a lock protects the events
var eLock sync.Mutex
events := make([](*api.Event), 0)
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dtc.Client.Events(dtc.Namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dtc.Client.Events(dtc.Namespace).Watch(options)
},
},
&api.Event{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
eLock.Lock()
defer eLock.Unlock()
events = append(events, obj.(*api.Event))
},
},
)
stop := make(chan struct{})
go controller.Run(stop)
// Create a listener for api updates
// uLock is a lock protects the updateCount
var uLock sync.Mutex
updateCount := 0
label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"}))
_, updateController := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label
return dtc.Client.Pods(dtc.Namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = label
return dtc.Client.Pods(dtc.Namespace).Watch(options)
},
},
&api.Pod{},
0,
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, _ interface{}) {
uLock.Lock()
defer uLock.Unlock()
updateCount++
},
},
)
go updateController.Run(stop)
// Start all replication controllers.
startTime := time.Now()
wg := sync.WaitGroup{}
wg.Add(len(dtc.Configs))
for i := range dtc.Configs {
rcConfig := dtc.Configs[i]
go func() {
defer GinkgoRecover()
framework.ExpectNoError(framework.RunRC(rcConfig))
wg.Done()
}()
}
logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.Client, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
wg.Wait()
startupTime := time.Now().Sub(startTime)
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
By("Waiting for all events to be recorded")
last := -1
current := len(events)
lastCount := -1
currentCount := updateCount
for start := time.Now(); (last < current || lastCount < currentCount) && time.Since(start) < dtc.Timeout; time.Sleep(10 * time.Second) {
func() {
eLock.Lock()
defer eLock.Unlock()
last = current
current = len(events)
}()
func() {
uLock.Lock()
defer uLock.Unlock()
lastCount = currentCount
currentCount = updateCount
}()
}
close(stop)
if current != last {
framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
}
framework.Logf("Found %d events", current)
if currentCount != lastCount {
framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
}
framework.Logf("Found %d updates", currentCount)
// Tune the threshold for allowed failures.
badEvents := framework.BadEvents(events)
Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount)))))
// Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data")
podList, err := dtc.Client.Pods(api.NamespaceAll).List(api.ListOptions{})
framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string)
for _, pod := range podList.Items {
if pod.Namespace == api.NamespaceSystem {
systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
} else {
pausePodAllocation[pod.Spec.NodeName]++
}
}
nodeNames := make([]string, 0)
for k := range pausePodAllocation {
nodeNames = append(nodeNames, k)
}
sort.Strings(nodeNames)
for _, node := range nodeNames {
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
}
return startupTime
}
func cleanupDensityTest(dtc DensityTestConfig) {
defer GinkgoRecover()
By("Deleting ReplicationController")
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc.Configs {
rcName := dtc.Configs[i].Name
rc, err := dtc.Client.ReplicationControllers(dtc.Namespace).Get(rcName)
if err == nil && rc.Spec.Replicas != 0 {
if framework.TestContext.GarbageCollectorEnabled {
By("Cleaning up only the replication controller, garbage collector will clean up the pods")
err := framework.DeleteRCAndWaitForGC(dtc.Client, dtc.Namespace, rcName)
framework.ExpectNoError(err)
} else {
By("Cleaning up the replication controller and pods")
err := framework.DeleteRCAndPods(dtc.Client, dtc.ClientSet, dtc.Namespace, rcName)
framework.ExpectNoError(err)
}
}
}
}
// This test suite can take a long time to run, and can affect or be affected by other tests.
// So by default it is added to the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
// IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
// results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup.
var _ = framework.KubeDescribe("Density", func() {
var c *client.Client
var nodeCount int
var RCName string
var additionalPodsPrefix string
var ns string
var uuid string
var e2eStartupTime time.Duration
var totalPods int
var nodeCpuCapacity int64
var nodeMemCapacity int64
var nodes *api.NodeList
var masters sets.String
// Gathers data prior to framework namespace teardown
AfterEach(func() {
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
if saturationThreshold < MinSaturationThreshold {
saturationThreshold = MinSaturationThreshold
}
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
saturationData := framework.SaturationTime{
TimeToSaturate: e2eStartupTime,
NumberOfNodes: nodeCount,
NumberOfPods: totalPods,
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
}
framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
// Verify latency metrics.
highLatencyRequests, err := framework.HighLatencyRequests(c)
framework.ExpectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
// Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver.
framework.ExpectNoError(framework.VerifySchedulerLatency(c))
})
// Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.).
f := framework.NewDefaultFramework("density")
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
c = f.Client
ns = f.Namespace.Name
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c))
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue()
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value()
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
uuid = string(utiluuid.NewUUID())
framework.ExpectNoError(framework.ResetMetrics(c))
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
framework.Logf("Listing nodes for easy debugging:\n")
for _, node := range nodes.Items {
var internalIP, externalIP string
for _, address := range node.Status.Addresses {
if address.Type == api.NodeInternalIP {
internalIP = address.Address
}
if address.Type == api.NodeExternalIP {
externalIP = address.Address
}
}
framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
}
})
type Density struct {
// Controls if e2e latency tests should be run (they are slow)
runLatencyTest bool
podsPerNode int
// Controls how often the apiserver is polled for pods
interval time.Duration
}
densityTests := []Density{
// TODO: Expose runLatencyTest as ginkgo flag.
{podsPerNode: 3, runLatencyTest: false, interval: 10 * time.Second},
{podsPerNode: 30, runLatencyTest: true, interval: 10 * time.Second},
{podsPerNode: 50, runLatencyTest: false, interval: 10 * time.Second},
{podsPerNode: 95, runLatencyTest: true, interval: 10 * time.Second},
{podsPerNode: 100, runLatencyTest: false, interval: 10 * time.Second},
}
for _, testArg := range densityTests {
feature := "ManualPerformance"
switch testArg.podsPerNode {
case 30:
feature = "Performance"
case 95:
feature = "HighDensityPerformance"
}
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node", feature, testArg.podsPerNode)
itArg := testArg
It(name, func() {
nodePreparer := framework.NewE2ETestNodePreparer(
f.ClientSet,
map[int]testutils.PrepareNodeStrategy{nodeCount: &testutils.TrivialNodePrepareStrategy{}},
)
framework.ExpectNoError(nodePreparer.PrepareNodes())
defer nodePreparer.CleanupNodes()
podsPerNode := itArg.podsPerNode
if podsPerNode == 30 {
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
}
totalPods = podsPerNode * nodeCount
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
timeout := 10 * time.Minute
// TODO: loop to podsPerNode instead of 1 when we're ready.
numberOrRCs := 1
RCConfigs := make([]testutils.RCConfig, numberOrRCs)
for i := 0; i < numberOrRCs; i++ {
RCName := "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = testutils.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.Client),
Name: RCName,
Namespace: ns,
Labels: map[string]string{"type": "densityPod"},
PollInterval: itArg.interval,
PodStatusFile: fileHndl,
Replicas: (totalPods + numberOrRCs - 1) / numberOrRCs,
CpuRequest: nodeCpuCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
}
}
dConfig := DensityTestConfig{
Client: c,
ClientSet: f.ClientSet,
Configs: RCConfigs,
PodCount: totalPods,
Namespace: ns,
PollInterval: itArg.interval,
Timeout: timeout,
}
e2eStartupTime = runDensityTest(dConfig)
if itArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
createTimes := make(map[string]unversioned.Time, 0)
nodes := make(map[string]string, 0)
scheduleTimes := make(map[string]unversioned.Time, 0)
runTimes := make(map[string]unversioned.Time, 0)
watchTimes := make(map[string]unversioned.Time, 0)
var mutex sync.Mutex
checkPod := func(p *api.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == api.PodRunning {
if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = unversioned.Now()
createTimes[p.Name] = p.CreationTimestamp
nodes[p.Name] = p.Spec.NodeName
var startTime unversioned.Time
for _, cs := range p.Status.ContainerStatuses {
if cs.State.Running != nil {
if startTime.Before(cs.State.Running.StartedAt) {
startTime = cs.State.Running.StartedAt
}
}
}
if startTime != unversioned.NewTime(time.Time{}) {
runTimes[p.Name] = startTime
} else {
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
}
}
}
}
additionalPodsPrefix = "density-latency-pod"
latencyPodsStore, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
return c.Pods(ns).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
return c.Pods(ns).Watch(options)
},
},
&api.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true))
go checkPod(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*api.Pod)
Expect(ok).To(Equal(true))
go checkPod(p)
},
},
)
stopCh := make(chan struct{})
go controller.Run(stopCh)
// Create some additional pods with throughput ~5 pods/sec.
var wg sync.WaitGroup
wg.Add(nodeCount)
// Explicitly set requests here.
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests
// (e.g. 100 pods/node kubemark).
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.Client), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
By("Waiting for all Pods begin observed by the watch...")
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < timeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
close(stopCh)
nodeToLatencyPods := make(map[string]int)
for _, item := range latencyPodsStore.List() {
pod := item.(*api.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
}
for node, count := range nodeToLatencyPods {
if count > 1 {
framework.Logf("%d latency pods scheduled on %s", count, node)
}
}
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": ns,
"source": api.DefaultSchedulerName,
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options)
framework.ExpectNoError(err)
for k := range createTimes {
for _, event := range schedEvents.Items {
if event.InvolvedObject.Name == k {
scheduleTimes[k] = event.FirstTimestamp
break
}
}
}
scheduleLag := make([]framework.PodLatencyData, 0)
startupLag := make([]framework.PodLatencyData, 0)
watchLag := make([]framework.PodLatencyData, 0)
schedToWatchLag := make([]framework.PodLatencyData, 0)
e2eLag := make([]framework.PodLatencyData, 0)
for name, create := range createTimes {
sched, ok := scheduleTimes[name]
Expect(ok).To(Equal(true))
run, ok := runTimes[name]
Expect(ok).To(Equal(true))
watch, ok := watchTimes[name]
Expect(ok).To(Equal(true))
node, ok := nodes[name]
Expect(ok).To(Equal(true))
scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
}
sort.Sort(framework.LatencySlice(scheduleLag))
sort.Sort(framework.LatencySlice(startupLag))
sort.Sort(framework.LatencySlice(watchLag))
sort.Sort(framework.LatencySlice(schedToWatchLag))
sort.Sort(framework.LatencySlice(e2eLag))
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
framework.PrintLatencies(watchLag, "worst watch latencies")
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
// Test whether e2e pod startup time is acceptable.
podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
By("Removing additional replication controllers")
deleteRC := func(i int) {
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, ns, name))
}
workqueue.Parallelize(16, nodeCount, deleteRC)
}
cleanupDensityTest(dConfig)
})
}
// Calculate total number of pods from each node's max-pod
It("[Feature:ManualPerformance] should allow running maximum capacity pods on nodes", func() {
totalPods = 0
for _, n := range nodes.Items {
totalPods += int(n.Status.Capacity.Pods().Value())
}
totalPods -= framework.WaitForStableCluster(c, masters)
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
rcCnt := 1
RCConfigs := make([]testutils.RCConfig, rcCnt)
podsPerRC := int(totalPods / rcCnt)
for i := 0; i < rcCnt; i++ {
if i == rcCnt-1 {
podsPerRC += int(math.Mod(float64(totalPods), float64(rcCnt)))
}
RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = testutils.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.Client),
Name: RCName,
Namespace: ns,
Labels: map[string]string{"type": "densityPod"},
PollInterval: 10 * time.Second,
PodStatusFile: fileHndl,
Replicas: podsPerRC,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
}
}
dConfig := DensityTestConfig{
Client: c,
ClientSet: f.ClientSet,
Configs: RCConfigs,
PodCount: totalPods,
Namespace: ns,
PollInterval: 10 * time.Second,
Timeout: 10 * time.Minute,
}
e2eStartupTime = runDensityTest(dConfig)
cleanupDensityTest(dConfig)
})
})
func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover()
defer wg.Done()
labels := map[string]string{
"type": podType,
"name": name,
}
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: api.ReplicationControllerSpec{
Replicas: 1,
Selector: labels,
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: image,
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: cpuRequest,
api.ResourceMemory: memRequest,
},
},
},
},
DNSPolicy: api.DNSDefault,
},
},
},
}
_, err := c.ReplicationControllers(ns).Create(rc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
framework.Logf("Found pod '%s' running", name)
}
| test/e2e/density.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0014432163443416357,
0.00019184422853868455,
0.00016035028966143727,
0.00017336801101919264,
0.00014366398681886494
] |
{
"id": 4,
"code_window": [
"\t\tfor ; index < sum; index++ {\n",
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {\n",
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1026
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpctypes
import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var (
// server-side error
ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided")
ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request")
ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found")
ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists")
ErrGRPCMemberExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: member ID already exist")
ErrGRPCPeerURLExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
ErrGRPCMemberBadURLs = grpc.Errorf(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
ErrGRPCMemberNotFound = grpc.Errorf(codes.NotFound, "etcdserver: member not found")
ErrGRPCRequestTooLarge = grpc.Errorf(codes.InvalidArgument, "etcdserver: request is too large")
ErrGRPCRootUserNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not exist")
ErrGRPCRootRoleNotExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: root user does not have root role")
ErrGRPCUserAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name already exists")
ErrGRPCUserNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: user name not found")
ErrGRPCRoleAlreadyExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name already exists")
ErrGRPCRoleNotFound = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role name not found")
ErrGRPCAuthFailed = grpc.Errorf(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
ErrGRPCPermissionDenied = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission denied")
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
ErrGRPCStopped = grpc.Errorf(codes.Unavailable, "etcdserver: server stopped")
errStringToError = map[string]error{
grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
grpc.ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
grpc.ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
grpc.ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
grpc.ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
grpc.ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
grpc.ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
grpc.ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
grpc.ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
grpc.ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
grpc.ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
grpc.ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
grpc.ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
grpc.ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
grpc.ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
grpc.ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
grpc.ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
grpc.ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
grpc.ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
}
// client-side error
ErrEmptyKey = Error(ErrGRPCEmptyKey)
ErrTooManyOps = Error(ErrGRPCTooManyOps)
ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
ErrCompacted = Error(ErrGRPCCompacted)
ErrFutureRev = Error(ErrGRPCFutureRev)
ErrNoSpace = Error(ErrGRPCNoSpace)
ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
ErrLeaseExist = Error(ErrGRPCLeaseExist)
ErrMemberExist = Error(ErrGRPCMemberExist)
ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
ErrUserNotFound = Error(ErrGRPCUserNotFound)
ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
ErrAuthFailed = Error(ErrGRPCAuthFailed)
ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
ErrNoLeader = Error(ErrGRPCNoLeader)
ErrNotCapable = Error(ErrGRPCNotCapable)
ErrStopped = Error(ErrGRPCStopped)
)
// EtcdError defines gRPC server errors.
// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
type EtcdError struct {
code codes.Code
desc string
}
// Code returns grpc/codes.Code.
// TODO: define clientv3/codes.Code.
func (e EtcdError) Code() codes.Code {
return e.code
}
func (e EtcdError) Error() string {
return e.desc
}
func Error(err error) error {
if err == nil {
return nil
}
verr, ok := errStringToError[grpc.ErrorDesc(err)]
if !ok { // not gRPC error
return err
}
return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)}
}
| vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00022920862829778343,
0.00018516193085815758,
0.00016644710558466613,
0.00017723401833791286,
0.00002063828105747234
] |
{
"id": 4,
"code_window": [
"\t\tfor ; index < sum; index++ {\n",
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {\n",
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1026
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package portforward
import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"sync"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/kubelet/server/portforward"
"k8s.io/client-go/pkg/util/httpstream"
"k8s.io/client-go/pkg/util/runtime"
)
// PortForwarder knows how to listen for local connections and forward them to
// a remote pod via an upgraded HTTP request.
type PortForwarder struct {
ports []ForwardedPort
stopChan <-chan struct{}
dialer httpstream.Dialer
streamConn httpstream.Connection
listeners []io.Closer
Ready chan struct{}
requestIDLock sync.Mutex
requestID int
out io.Writer
errOut io.Writer
}
// ForwardedPort contains a Local:Remote port pairing.
type ForwardedPort struct {
Local uint16
Remote uint16
}
/*
valid port specifications:
5000
- forwards from localhost:5000 to pod:5000
8888:5000
- forwards from localhost:8888 to pod:5000
0:5000
:5000
- selects a random available local port,
forwards from localhost:<random port> to pod:5000
*/
func parsePorts(ports []string) ([]ForwardedPort, error) {
var forwards []ForwardedPort
for _, portString := range ports {
parts := strings.Split(portString, ":")
var localString, remoteString string
if len(parts) == 1 {
localString = parts[0]
remoteString = parts[0]
} else if len(parts) == 2 {
localString = parts[0]
if localString == "" {
// support :5000
localString = "0"
}
remoteString = parts[1]
} else {
return nil, fmt.Errorf("Invalid port format '%s'", portString)
}
localPort, err := strconv.ParseUint(localString, 10, 16)
if err != nil {
return nil, fmt.Errorf("Error parsing local port '%s': %s", localString, err)
}
remotePort, err := strconv.ParseUint(remoteString, 10, 16)
if err != nil {
return nil, fmt.Errorf("Error parsing remote port '%s': %s", remoteString, err)
}
if remotePort == 0 {
return nil, fmt.Errorf("Remote port must be > 0")
}
forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)})
}
return forwards, nil
}
// New creates a new PortForwarder.
func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
if len(ports) == 0 {
return nil, errors.New("You must specify at least 1 port")
}
parsedPorts, err := parsePorts(ports)
if err != nil {
return nil, err
}
return &PortForwarder{
dialer: dialer,
ports: parsedPorts,
stopChan: stopChan,
Ready: readyChan,
out: out,
errOut: errOut,
}, nil
}
// ForwardPorts formats and executes a port forwarding request. The connection will remain
// open until stopChan is closed.
func (pf *PortForwarder) ForwardPorts() error {
defer pf.Close()
var err error
pf.streamConn, _, err = pf.dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil {
return fmt.Errorf("error upgrading connection: %s", err)
}
defer pf.streamConn.Close()
return pf.forward()
}
// forward dials the remote host specific in req, upgrades the request, starts
// listeners for each port specified in ports, and forwards local connections
// to the remote host via streams.
func (pf *PortForwarder) forward() error {
var err error
listenSuccess := false
for _, port := range pf.ports {
err = pf.listenOnPort(&port)
switch {
case err == nil:
listenSuccess = true
default:
if pf.errOut != nil {
fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err)
}
}
}
if !listenSuccess {
return fmt.Errorf("Unable to listen on any of the requested ports: %v", pf.ports)
}
if pf.Ready != nil {
close(pf.Ready)
}
// wait for interrupt or conn closure
select {
case <-pf.stopChan:
case <-pf.streamConn.CloseChan():
runtime.HandleError(errors.New("lost connection to pod"))
}
return nil
}
// listenOnPort delegates tcp4 and tcp6 listener creation and waits for connections on both of these addresses.
// If both listener creation fail, an error is raised.
func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
errTcp4 := pf.listenOnPortAndAddress(port, "tcp4", "127.0.0.1")
errTcp6 := pf.listenOnPortAndAddress(port, "tcp6", "[::1]")
if errTcp4 != nil && errTcp6 != nil {
return fmt.Errorf("All listeners failed to create with the following errors: %s, %s", errTcp4, errTcp6)
}
return nil
}
// listenOnPortAndAddress delegates listener creation and waits for new connections
// in the background f
func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error {
listener, err := pf.getListener(protocol, address, port)
if err != nil {
return err
}
pf.listeners = append(pf.listeners, listener)
go pf.waitForConnection(listener, *port)
return nil
}
// getListener creates a listener on the interface targeted by the given hostname on the given port with
// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6
func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
listener, err := net.Listen(protocol, fmt.Sprintf("%s:%d", hostname, port.Local))
if err != nil {
runtime.HandleError(fmt.Errorf("Unable to create listener: Error %s", err))
return nil, err
}
listenerAddress := listener.Addr().String()
host, localPort, _ := net.SplitHostPort(listenerAddress)
localPortUInt, err := strconv.ParseUint(localPort, 10, 16)
if err != nil {
return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host)
}
port.Local = uint16(localPortUInt)
if pf.out != nil {
fmt.Fprintf(pf.out, "Forwarding from %s:%d -> %d\n", hostname, localPortUInt, port.Remote)
}
return listener, nil
}
// waitForConnection waits for new connections to listener and handles them in
// the background.
func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) {
for {
conn, err := listener.Accept()
if err != nil {
// TODO consider using something like https://github.com/hydrogen18/stoppableListener?
if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("Error accepting connection on port %d: %v", port.Local, err))
}
return
}
go pf.handleConnection(conn, port)
}
}
func (pf *PortForwarder) nextRequestID() int {
pf.requestIDLock.Lock()
defer pf.requestIDLock.Unlock()
id := pf.requestID
pf.requestID++
return id
}
// handleConnection copies data between the local connection and the stream to
// the remote server.
func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
defer conn.Close()
if pf.out != nil {
fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local)
}
requestID := pf.nextRequestID()
// create error stream
headers := http.Header{}
headers.Set(api.StreamType, api.StreamTypeError)
headers.Set(api.PortHeader, fmt.Sprintf("%d", port.Remote))
headers.Set(api.PortForwardRequestIDHeader, strconv.Itoa(requestID))
errorStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
// we're not writing to this stream
errorStream.Close()
errorChan := make(chan error)
go func() {
message, err := ioutil.ReadAll(errorStream)
switch {
case err != nil:
errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err)
case len(message) > 0:
errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message))
}
close(errorChan)
}()
// create data stream
headers.Set(api.StreamType, api.StreamTypeData)
dataStream, err := pf.streamConn.CreateStream(headers)
if err != nil {
runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err))
return
}
localError := make(chan struct{})
remoteDone := make(chan struct{})
go func() {
// Copy from the remote side to the local port.
if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err))
}
// inform the select below that the remote copy is done
close(remoteDone)
}()
go func() {
// inform server we're not sending any more data after copy unblocks
defer dataStream.Close()
// Copy from the local port to the remote side.
if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err))
// break out of the select below without waiting for the other copy to finish
close(localError)
}
}()
// wait for either a local->remote error or for copying from remote->local to finish
select {
case <-remoteDone:
case <-localError:
}
// always expect something on errorChan (it may be nil)
err = <-errorChan
if err != nil {
runtime.HandleError(err)
}
}
func (pf *PortForwarder) Close() {
// stop all listeners
for _, l := range pf.listeners {
if err := l.Close(); err != nil {
runtime.HandleError(fmt.Errorf("error closing listener: %v", err))
}
}
}
| staging/src/k8s.io/client-go/tools/portforward/portforward.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.001609851373359561,
0.00021060252038296312,
0.00016394047997891903,
0.00016869569662958384,
0.00024000897246878594
] |
{
"id": 4,
"code_window": [
"\t\tfor ; index < sum; index++ {\n",
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {\n",
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1026
} | <!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
<!-- BEGIN STRIP_FOR_RELEASE -->
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING"
width="25" height="25">
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
If you are using a released version of Kubernetes, you should
refer to the docs that go with that version.
<!-- TAG RELEASE_LINK, added by the munger automatically -->
<strong>
The latest release of this document can be found
[here](http://releases.k8s.io/release-1.4/docs/devel/e2e-tests.md).
Documentation for other releases can be found at
[releases.k8s.io](http://releases.k8s.io).
</strong>
--
<!-- END STRIP_FOR_RELEASE -->
<!-- END MUNGE: UNVERSIONED_WARNING -->
# End-to-End Testing in Kubernetes
Updated: 5/3/2016
**Table of Contents**
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [End-to-End Testing in Kubernetes](#end-to-end-testing-in-kubernetes)
- [Overview](#overview)
- [Building and Running the Tests](#building-and-running-the-tests)
- [Cleaning up](#cleaning-up)
- [Advanced testing](#advanced-testing)
- [Bringing up a cluster for testing](#bringing-up-a-cluster-for-testing)
- [Federation e2e tests](#federation-e2e-tests)
- [Configuring federation e2e tests](#configuring-federation-e2e-tests)
- [Image Push Repository](#image-push-repository)
- [Build](#build)
- [Deploy federation control plane](#deploy-federation-control-plane)
- [Run the Tests](#run-the-tests)
- [Teardown](#teardown)
- [Shortcuts for test developers](#shortcuts-for-test-developers)
- [Debugging clusters](#debugging-clusters)
- [Local clusters](#local-clusters)
- [Testing against local clusters](#testing-against-local-clusters)
- [Version-skewed and upgrade testing](#version-skewed-and-upgrade-testing)
- [Kinds of tests](#kinds-of-tests)
- [Viper configuration and hierarchichal test parameters.](#viper-configuration-and-hierarchichal-test-parameters)
- [Conformance tests](#conformance-tests)
- [Defining Conformance Subset](#defining-conformance-subset)
- [Continuous Integration](#continuous-integration)
- [What is CI?](#what-is-ci)
- [What runs in CI?](#what-runs-in-ci)
- [Non-default tests](#non-default-tests)
- [The PR-builder](#the-pr-builder)
- [Adding a test to CI](#adding-a-test-to-ci)
- [Moving a test out of CI](#moving-a-test-out-of-ci)
- [Performance Evaluation](#performance-evaluation)
- [One More Thing](#one-more-thing)
<!-- END MUNGE: GENERATED_TOC -->
## Overview
End-to-end (e2e) tests for Kubernetes provide a mechanism to test end-to-end
behavior of the system, and is the last signal to ensure end user operations
match developer specifications. Although unit and integration tests provide a
good signal, in a distributed system like Kubernetes it is not uncommon that a
minor change may pass all unit and integration tests, but cause unforeseen
changes at the system level.
The primary objectives of the e2e tests are to ensure a consistent and reliable
behavior of the kubernetes code base, and to catch hard-to-test bugs before
users do, when unit and integration tests are insufficient.
The e2e tests in kubernetes are built atop of
[Ginkgo](http://onsi.github.io/ginkgo/) and
[Gomega](http://onsi.github.io/gomega/). There are a host of features that this
Behavior-Driven Development (BDD) testing framework provides, and it is
recommended that the developer read the documentation prior to diving into the
tests.
The purpose of *this* document is to serve as a primer for developers who are
looking to execute or add tests using a local development environment.
Before writing new tests or making substantive changes to existing tests, you
should also read [Writing Good e2e Tests](writing-good-e2e-tests.md)
## Building and Running the Tests
There are a variety of ways to run e2e tests, but we aim to decrease the number
of ways to run e2e tests to a canonical way: `hack/e2e.go`.
You can run an end-to-end test which will bring up a master and nodes, perform
some tests, and then tear everything down. Make sure you have followed the
getting started steps for your chosen cloud platform (which might involve
changing the `KUBERNETES_PROVIDER` environment variable to something other than
"gce").
To build Kubernetes, up a cluster, run tests, and tear everything down, use:
```sh
go run hack/e2e.go -v --build --up --test --down
```
If you'd like to just perform one of these steps, here are some examples:
```sh
# Build binaries for testing
go run hack/e2e.go -v --build
# Create a fresh cluster. Deletes a cluster first, if it exists
go run hack/e2e.go -v --up
# Run all tests
go run hack/e2e.go -v --test
# Run tests matching the regex "\[Feature:Performance\]"
go run hack/e2e.go -v --test --test_args="--ginkgo.focus=\[Feature:Performance\]"
# Conversely, exclude tests that match the regex "Pods.*env"
go run hack/e2e.go -v --test --test_args="--ginkgo.skip=Pods.*env"
# Run tests in parallel, skip any that must be run serially
GINKGO_PARALLEL=y go run hack/e2e.go --v --test --test_args="--ginkgo.skip=\[Serial\]"
# Run tests in parallel, skip any that must be run serially and keep the test namespace if test failed
GINKGO_PARALLEL=y go run hack/e2e.go --v --test --test_args="--ginkgo.skip=\[Serial\] --delete-namespace-on-falure=false"
# Flags can be combined, and their actions will take place in this order:
# --build, --up, --test, --down
#
# You can also specify an alternative provider, such as 'aws'
#
# e.g.:
KUBERNETES_PROVIDER=aws go run hack/e2e.go -v --build --up --test --down
# -ctl can be used to quickly call kubectl against your e2e cluster. Useful for
# cleaning up after a failed test or viewing logs. Use -v to avoid suppressing
# kubectl output.
go run hack/e2e.go -v -ctl='get events'
go run hack/e2e.go -v -ctl='delete pod foobar'
```
The tests are built into a single binary which can be run used to deploy a
Kubernetes system or run tests against an already-deployed Kubernetes system.
See `go run hack/e2e.go --help` (or the flag definitions in `hack/e2e.go`) for
more options, such as reusing an existing cluster.
### Cleaning up
During a run, pressing `control-C` should result in an orderly shutdown, but if
something goes wrong and you still have some VMs running you can force a cleanup
with this command:
```sh
go run hack/e2e.go -v --down
```
## Advanced testing
### Bringing up a cluster for testing
If you want, you may bring up a cluster in some other manner and run tests
against it. To do so, or to do other non-standard test things, you can pass
arguments into Ginkgo using `--test_args` (e.g. see above). For the purposes of
brevity, we will look at a subset of the options, which are listed below:
```
--ginkgo.dryRun=false: If set, ginkgo will walk the test hierarchy without
actually running anything. Best paired with -v.
--ginkgo.failFast=false: If set, ginkgo will stop running a test suite after a
failure occurs.
--ginkgo.failOnPending=false: If set, ginkgo will mark the test suite as failed
if any specs are pending.
--ginkgo.focus="": If set, ginkgo will only run specs that match this regular
expression.
--ginkgo.skip="": If set, ginkgo will only run specs that do not match this
regular expression.
--ginkgo.trace=false: If set, default reporter prints out the full stack trace
when a failure occurs
--ginkgo.v=false: If set, default reporter print out all specs as they begin.
--host="": The host, or api-server, to connect to
--kubeconfig="": Path to kubeconfig containing embedded authinfo.
--prom-push-gateway="": The URL to prometheus gateway, so that metrics can be
pushed during e2es and scraped by prometheus. Typically something like
127.0.0.1:9091.
--provider="": The name of the Kubernetes provider (gce, gke, local, vagrant,
etc.)
--repo-root="../../": Root directory of kubernetes repository, for finding test
files.
```
Prior to running the tests, you may want to first create a simple auth file in
your home directory, e.g. `$HOME/.kube/config`, with the following:
```
{
"User": "root",
"Password": ""
}
```
As mentioned earlier there are a host of other options that are available, but
they are left to the developer.
**NOTE:** If you are running tests on a local cluster repeatedly, you may need
to periodically perform some manual cleanup:
- `rm -rf /var/run/kubernetes`, clear kube generated credentials, sometimes
stale permissions can cause problems.
- `sudo iptables -F`, clear ip tables rules left by the kube-proxy.
### Federation e2e tests
By default, `e2e.go` provisions a single Kubernetes cluster, and any `Feature:Federation` ginkgo tests will be skipped.
Federation e2e testing involve bringing up multiple "underlying" Kubernetes clusters,
and deploying the federation control plane as a Kubernetes application on the underlying clusters.
The federation e2e tests are still managed via `e2e.go`, but require some extra configuration items.
#### Configuring federation e2e tests
The following environment variables will enable federation e2e building, provisioning and testing.
```sh
$ export FEDERATION=true
$ export E2E_ZONES="us-central1-a us-central1-b us-central1-f"
```
A Kubernetes cluster will be provisioned in each zone listed in `E2E_ZONES`. A zone can only appear once in the `E2E_ZONES` list.
#### Image Push Repository
Next, specify the docker repository where your ci images will be pushed.
* **If `KUBERNETES_PROVIDER=gce` or `KUBERNETES_PROVIDER=gke`**:
If you use the same GCP project where you to run the e2e tests as the container image repository,
FEDERATION_PUSH_REPO_BASE environment variable will be defaulted to "gcr.io/${DEFAULT_GCP_PROJECT_NAME}".
You can skip ahead to the **Build** section.
You can simply set your push repo base based on your project name, and the necessary repositories will be
auto-created when you first push your container images.
```sh
$ export FEDERATION_PUSH_REPO_BASE="gcr.io/${GCE_PROJECT_NAME}"
```
Skip ahead to the **Build** section.
* **For all other providers**:
You'll be responsible for creating and managing access to the repositories manually.
```sh
$ export FEDERATION_PUSH_REPO_BASE="quay.io/colin_hom"
```
Given this example, the `federation-apiserver` container image will be pushed to the repository
`quay.io/colin_hom/federation-apiserver`.
The docker client on the machine running `e2e.go` must have push access for the following pre-existing repositories:
* `${FEDERATION_PUSH_REPO_BASE}/federation-apiserver`
* `${FEDERATION_PUSH_REPO_BASE}/federation-controller-manager`
These repositories must allow public read access, as the e2e node docker daemons will not have any credentials. If you're using
GCE/GKE as your provider, the repositories will have read-access by default.
#### Build
* Compile the binaries and build container images:
```sh
$ KUBE_RELEASE_RUN_TESTS=n KUBE_FASTBUILD=true go run hack/e2e.go -v -build
```
* Push the federation container images
```sh
$ build/push-federation-images.sh
```
#### Deploy federation control plane
The following command will create the underlying Kubernetes clusters in each of `E2E_ZONES`, and then provision the
federation control plane in the cluster occupying the last zone in the `E2E_ZONES` list.
```sh
$ go run hack/e2e.go -v --up
```
#### Run the Tests
This will run only the `Feature:Federation` e2e tests. You can omit the `ginkgo.focus` argument to run the entire e2e suite.
```sh
$ go run hack/e2e.go -v --test --test_args="--ginkgo.focus=\[Feature:Federation\]"
```
#### Teardown
```sh
$ go run hack/e2e.go -v --down
```
#### Shortcuts for test developers
* To speed up `e2e.go -up`, provision a single-node kubernetes cluster in a single e2e zone:
`NUM_NODES=1 E2E_ZONES="us-central1-f"`
Keep in mind that some tests may require multiple underlying clusters and/or minimum compute resource availability.
* You can quickly recompile the e2e testing framework via `go install ./test/e2e`. This will not do anything besides
allow you to verify that the go code compiles.
* If you want to run your e2e testing framework without re-provisioning the e2e setup, you can do so via
`make WHAT=test/e2e/e2e.test` and then re-running the ginkgo tests.
* If you're hacking around with the federation control plane deployment itself,
you can quickly re-deploy the federation control plane Kubernetes manifests without tearing any resources down.
To re-deploy the federation control plane after running `-up` for the first time:
```sh
$ federation/cluster/federation-up.sh
```
### Debugging clusters
If a cluster fails to initialize, or you'd like to better understand cluster
state to debug a failed e2e test, you can use the `cluster/log-dump.sh` script
to gather logs.
This script requires that the cluster provider supports ssh. Assuming it does,
running:
```
cluster/log-dump.sh <directory>
````
will ssh to the master and all nodes and download a variety of useful logs to
the provided directory (which should already exist).
The Google-run Jenkins builds automatically collected these logs for every
build, saving them in the `artifacts` directory uploaded to GCS.
### Local clusters
It can be much faster to iterate on a local cluster instead of a cloud-based
one. To start a local cluster, you can run:
```sh
# The PATH construction is needed because PATH is one of the special-cased
# environment variables not passed by sudo -E
sudo PATH=$PATH hack/local-up-cluster.sh
```
This will start a single-node Kubernetes cluster than runs pods using the local
docker daemon. Press Control-C to stop the cluster.
You can generate a valid kubeconfig file by following instructions printed at the
end of aforementioned script.
#### Testing against local clusters
In order to run an E2E test against a locally running cluster, point the tests
at a custom host directly:
```sh
export KUBECONFIG=/path/to/kubeconfig
export KUBE_MASTER_IP="http://127.0.0.1:<PORT>"
export KUBE_MASTER=local
go run hack/e2e.go -v --test
```
To control the tests that are run:
```sh
go run hack/e2e.go -v --test --test_args="--ginkgo.focus=\"Secrets\""
```
### Version-skewed and upgrade testing
We run version-skewed tests to check that newer versions of Kubernetes work
similarly enough to older versions. The general strategy is to cover the following cases:
1. One version of `kubectl` with another version of the cluster and tests (e.g.
that v1.2 and v1.4 `kubectl` doesn't break v1.3 tests running against a v1.3
cluster).
1. A newer version of the Kubernetes master with older nodes and tests (e.g.
that upgrading a master to v1.3 with nodes at v1.2 still passes v1.2 tests).
1. A newer version of the whole cluster with older tests (e.g. that a cluster
upgraded---master and nodes---to v1.3 still passes v1.2 tests).
1. That an upgraded cluster functions the same as a brand-new cluster of the
same version (e.g. a cluster upgraded to v1.3 passes the same v1.3 tests as
a newly-created v1.3 cluster).
[hack/e2e-runner.sh](http://releases.k8s.io/HEAD/hack/jenkins/e2e-runner.sh) is
the authoritative source on how to run version-skewed tests, but below is a
quick-and-dirty tutorial.
```sh
# Assume you have two copies of the Kubernetes repository checked out, at
# ./kubernetes and ./kubernetes_old
# If using GKE:
export KUBERNETES_PROVIDER=gke
export CLUSTER_API_VERSION=${OLD_VERSION}
# Deploy a cluster at the old version; see above for more details
cd ./kubernetes_old
go run ./hack/e2e.go -v --up
# Upgrade the cluster to the new version
#
# If using GKE, add --upgrade-target=${NEW_VERSION}
#
# You can target Feature:MasterUpgrade or Feature:ClusterUpgrade
cd ../kubernetes
go run ./hack/e2e.go -v --test --check_version_skew=false --test_args="--ginkgo.focus=\[Feature:MasterUpgrade\]"
# Run old tests with new kubectl
cd ../kubernetes_old
go run ./hack/e2e.go -v --test --test_args="--kubectl-path=$(pwd)/../kubernetes/cluster/kubectl.sh"
```
If you are just testing version-skew, you may want to just deploy at one
version and then test at another version, instead of going through the whole
upgrade process:
```sh
# With the same setup as above
# Deploy a cluster at the new version
cd ./kubernetes
go run ./hack/e2e.go -v --up
# Run new tests with old kubectl
go run ./hack/e2e.go -v --test --test_args="--kubectl-path=$(pwd)/../kubernetes_old/cluster/kubectl.sh"
# Run old tests with new kubectl
cd ../kubernetes_old
go run ./hack/e2e.go -v --test --test_args="--kubectl-path=$(pwd)/../kubernetes/cluster/kubectl.sh"
```
## Kinds of tests
We are working on implementing clearer partitioning of our e2e tests to make
running a known set of tests easier (#10548). Tests can be labeled with any of
the following labels, in order of increasing precedence (that is, each label
listed below supersedes the previous ones):
- If a test has no labels, it is expected to run fast (under five minutes), be
able to be run in parallel, and be consistent.
- `[Slow]`: If a test takes more than five minutes to run (by itself or in
parallel with many other tests), it is labeled `[Slow]`. This partition allows
us to run almost all of our tests quickly in parallel, without waiting for the
stragglers to finish.
- `[Serial]`: If a test cannot be run in parallel with other tests (e.g. it
takes too many resources or restarts nodes), it is labeled `[Serial]`, and
should be run in serial as part of a separate suite.
- `[Disruptive]`: If a test restarts components that might cause other tests
to fail or break the cluster completely, it is labeled `[Disruptive]`. Any
`[Disruptive]` test is also assumed to qualify for the `[Serial]` label, but
need not be labeled as both. These tests are not run against soak clusters to
avoid restarting components.
- `[Flaky]`: If a test is found to be flaky and we have decided that it's too
hard to fix in the short term (e.g. it's going to take a full engineer-week), it
receives the `[Flaky]` label until it is fixed. The `[Flaky]` label should be
used very sparingly, and should be accompanied with a reference to the issue for
de-flaking the test, because while a test remains labeled `[Flaky]`, it is not
monitored closely in CI. `[Flaky]` tests are by default not run, unless a
`focus` or `skip` argument is explicitly given.
- `[Feature:.+]`: If a test has non-default requirements to run or targets
some non-core functionality, and thus should not be run as part of the standard
suite, it receives a `[Feature:.+]` label, e.g. `[Feature:Performance]` or
`[Feature:Ingress]`. `[Feature:.+]` tests are not run in our core suites,
instead running in custom suites. If a feature is experimental or alpha and is
not enabled by default due to being incomplete or potentially subject to
breaking changes, it does *not* block the merge-queue, and thus should run in
some separate test suites owned by the feature owner(s)
(see [Continuous Integration](#continuous-integration) below).
### Viper configuration and hierarchichal test parameters.
The future of e2e test configuration idioms will be increasingly defined using viper, and decreasingly via flags.
Flags in general fall apart once tests become sufficiently complicated. So, even if we could use another flag library, it wouldn't be ideal.
To use viper, rather than flags, to configure your tests:
- Just add "e2e.json" to the current directory you are in, and define parameters in it... i.e. `"kubeconfig":"/tmp/x"`.
Note that advanced testing parameters, and hierarchichally defined parameters, are only defined in viper, to see what they are, you can dive into [TestContextType](../../test/e2e/framework/test_context.go).
In time, it is our intent to add or autogenerate a sample viper configuration that includes all e2e parameters, to ship with kubernetes.
### Conformance tests
Finally, `[Conformance]` tests represent a subset of the e2e-tests we expect to
pass on **any** Kubernetes cluster. The `[Conformance]` label does not supersede
any other labels.
As each new release of Kubernetes providers new functionality, the subset of
tests necessary to demonstrate conformance grows with each release. Conformance
is thus considered versioned, with the same backwards compatibility guarantees
as laid out in [our versioning policy](../design/versioning.md#supported-releases).
Conformance tests for a given version should be run off of the release branch
that corresponds to that version. Thus `v1.2` conformance tests would be run
from the head of the `release-1.2` branch. eg:
- A v1.3 development cluster should pass v1.1, v1.2 conformance tests
- A v1.2 cluster should pass v1.1, v1.2 conformance tests
- A v1.1 cluster should pass v1.0, v1.1 conformance tests, and fail v1.2
conformance tests
Conformance tests are designed to be run with no cloud provider configured.
Conformance tests can be run against clusters that have not been created with
`hack/e2e.go`, just provide a kubeconfig with the appropriate endpoint and
credentials.
```sh
# setup for conformance tests
export KUBECONFIG=/path/to/kubeconfig
export KUBERNETES_CONFORMANCE_TEST=y
export KUBERNETES_PROVIDER=skeleton
# run all conformance tests
go run hack/e2e.go -v --test --test_args="--ginkgo.focus=\[Conformance\]"
# run all parallel-safe conformance tests in parallel
GINKGO_PARALLEL=y go run hack/e2e.go -v --test --test_args="--ginkgo.focus=\[Conformance\] --ginkgo.skip=\[Serial\]"
# ... and finish up with remaining tests in serial
go run hack/e2e.go -v --test --test_args="--ginkgo.focus=\[Serial\].*\[Conformance\]"
```
### Defining Conformance Subset
It is impossible to define the entire space of Conformance tests without knowing
the future, so instead, we define the compliment of conformance tests, below
(`Please update this with companion PRs as necessary`):
- A conformance test cannot test cloud provider specific features (i.e. GCE
monitoring, S3 Bucketing, ...)
- A conformance test cannot rely on any particular non-standard file system
permissions granted to containers or users (i.e. sharing writable host /tmp with
a container)
- A conformance test cannot rely on any binaries that are not required for the
linux kernel or for a kubelet to run (i.e. git)
- A conformance test cannot test a feature which obviously cannot be supported
on a broad range of platforms (i.e. testing of multiple disk mounts, GPUs, high
density)
## Continuous Integration
A quick overview of how we run e2e CI on Kubernetes.
### What is CI?
We run a battery of `e2e` tests against `HEAD` of the master branch on a
continuous basis, and block merges via the [submit
queue](http://submit-queue.k8s.io/) on a subset of those tests if they fail (the
subset is defined in the [munger config]
(https://github.com/kubernetes/contrib/blob/master/mungegithub/mungers/submit-queue.go)
via the `jenkins-jobs` flag; note we also block on `kubernetes-build` and
`kubernetes-test-go` jobs for build and unit and integration tests).
CI results can be found at [ci-test.k8s.io](http://ci-test.k8s.io), e.g.
[ci-test.k8s.io/kubernetes-e2e-gce/10594](http://ci-test.k8s.io/kubernetes-e2e-gce/10594).
### What runs in CI?
We run all default tests (those that aren't marked `[Flaky]` or `[Feature:.+]`)
against GCE and GKE. To minimize the time from regression-to-green-run, we
partition tests across different jobs:
- `kubernetes-e2e-<provider>` runs all non-`[Slow]`, non-`[Serial]`,
non-`[Disruptive]`, non-`[Flaky]`, non-`[Feature:.+]` tests in parallel.
- `kubernetes-e2e-<provider>-slow` runs all `[Slow]`, non-`[Serial]`,
non-`[Disruptive]`, non-`[Flaky]`, non-`[Feature:.+]` tests in parallel.
- `kubernetes-e2e-<provider>-serial` runs all `[Serial]` and `[Disruptive]`,
non-`[Flaky]`, non-`[Feature:.+]` tests in serial.
We also run non-default tests if the tests exercise general-availability ("GA")
features that require a special environment to run in, e.g.
`kubernetes-e2e-gce-scalability` and `kubernetes-kubemark-gce`, which test for
Kubernetes performance.
#### Non-default tests
Many `[Feature:.+]` tests we don't run in CI. These tests are for features that
are experimental (often in the `experimental` API), and aren't enabled by
default.
### The PR-builder
We also run a battery of tests against every PR before we merge it. These tests
are equivalent to `kubernetes-gce`: it runs all non-`[Slow]`, non-`[Serial]`,
non-`[Disruptive]`, non-`[Flaky]`, non-`[Feature:.+]` tests in parallel. These
tests are considered "smoke tests" to give a decent signal that the PR doesn't
break most functionality. Results for your PR can be found at
[pr-test.k8s.io](http://pr-test.k8s.io), e.g.
[pr-test.k8s.io/20354](http://pr-test.k8s.io/20354) for #20354.
### Adding a test to CI
As mentioned above, prior to adding a new test, it is a good idea to perform a
`-ginkgo.dryRun=true` on the system, in order to see if a behavior is already
being tested, or to determine if it may be possible to augment an existing set
of tests for a specific use case.
If a behavior does not currently have coverage and a developer wishes to add a
new e2e test, navigate to the ./test/e2e directory and create a new test using
the existing suite as a guide.
TODO(#20357): Create a self-documented example which has been disabled, but can
be copied to create new tests and outlines the capabilities and libraries used.
When writing a test, consult #kinds_of_tests above to determine how your test
should be marked, (e.g. `[Slow]`, `[Serial]`; remember, by default we assume a
test can run in parallel with other tests!).
When first adding a test it should *not* go straight into CI, because failures
block ordinary development. A test should only be added to CI after is has been
running in some non-CI suite long enough to establish a track record showing
that the test does not fail when run against *working* software. Note also that
tests running in CI are generally running on a well-loaded cluster, so must
contend for resources; see above about [kinds of tests](#kinds_of_tests).
Generally, a feature starts as `experimental`, and will be run in some suite
owned by the team developing the feature. If a feature is in beta or GA, it
*should* block the merge-queue. In moving from experimental to beta or GA, tests
that are expected to pass by default should simply remove the `[Feature:.+]`
label, and will be incorporated into our core suites. If tests are not expected
to pass by default, (e.g. they require a special environment such as added
quota,) they should remain with the `[Feature:.+]` label, and the suites that
run them should be incorporated into the
[munger config](https://github.com/kubernetes/contrib/blob/master/mungegithub/mungers/submit-queue.go)
via the `jenkins-jobs` flag.
Occasionally, we'll want to add tests to better exercise features that are
already GA. These tests also shouldn't go straight to CI. They should begin by
being marked as `[Flaky]` to be run outside of CI, and once a track-record for
them is established, they may be promoted out of `[Flaky]`.
### Moving a test out of CI
If we have determined that a test is known-flaky and cannot be fixed in the
short-term, we may move it out of CI indefinitely. This move should be used
sparingly, as it effectively means that we have no coverage of that test. When a
test is demoted, it should be marked `[Flaky]` with a comment accompanying the
label with a reference to an issue opened to fix the test.
## Performance Evaluation
Another benefit of the e2e tests is the ability to create reproducible loads on
the system, which can then be used to determine the responsiveness, or analyze
other characteristics of the system. For example, the density tests load the
system to 30,50,100 pods per/node and measures the different characteristics of
the system, such as throughput, api-latency, etc.
For a good overview of how we analyze performance data, please read the
following [post](http://blog.kubernetes.io/2015/09/kubernetes-performance-measurements-and.html)
For developers who are interested in doing their own performance analysis, we
recommend setting up [prometheus](http://prometheus.io/) for data collection,
and using [promdash](http://prometheus.io/docs/visualization/promdash/) to
visualize the data. There also exists the option of pushing your own metrics in
from the tests using a
[prom-push-gateway](http://prometheus.io/docs/instrumenting/pushing/).
Containers for all of these components can be found
[here](https://hub.docker.com/u/prom/).
For more accurate measurements, you may wish to set up prometheus external to
kubernetes in an environment where it can access the major system components
(api-server, controller-manager, scheduler). This is especially useful when
attempting to gather metrics in a load-balanced api-server environment, because
all api-servers can be analyzed independently as well as collectively. On
startup, configuration file is passed to prometheus that specifies the endpoints
that prometheus will scrape, as well as the sampling interval.
```
#prometheus.conf
job: {
name: "kubernetes"
scrape_interval: "1s"
target_group: {
# apiserver(s)
target: "http://localhost:8080/metrics"
# scheduler
target: "http://localhost:10251/metrics"
# controller-manager
target: "http://localhost:10252/metrics"
}
}
```
Once prometheus is scraping the kubernetes endpoints, that data can then be
plotted using promdash, and alerts can be created against the assortment of
metrics that kubernetes provides.
## One More Thing
You should also know the [testing conventions](coding-conventions.md#testing-conventions).
**HAPPY TESTING!**
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
| docs/devel/e2e-tests.md | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00035474516334943473,
0.00017253799887839705,
0.0001621371484361589,
0.00017083768034353852,
0.000021326874048099853
] |
{
"id": 5,
"code_window": [
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tp.nodeToAppliedStrategy[nodes.Items[index].Name] = strategy\n",
"\t\t}\n",
"\t}\n",
"\treturn nil\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tp.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1030
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"testing"
"time"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate
// when the cluster has 100 nodes and 0 scheduled pods
func BenchmarkScheduling100Nodes0Pods(b *testing.B) {
benchmarkScheduling(100, 0, b)
}
// BenchmarkScheduling100Nodes1000Pods benchmarks the scheduling rate
// when the cluster has 100 nodes and 1000 scheduled pods
func BenchmarkScheduling100Nodes1000Pods(b *testing.B) {
benchmarkScheduling(100, 1000, b)
}
// BenchmarkScheduling1000Nodes0Pods benchmarks the scheduling rate
// when the cluster has 1000 nodes and 0 scheduled pods
func BenchmarkScheduling1000Nodes0Pods(b *testing.B) {
benchmarkScheduling(1000, 0, b)
}
// BenchmarkScheduling1000Nodes1000Pods benchmarks the scheduling rate
// when the cluster has 1000 nodes and 1000 scheduled pods
func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) {
benchmarkScheduling(1000, 1000, b)
}
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
// and specific number of pods already scheduled. Since an operation takes relatively
// long time, b.N should be small: 10 - 100.
func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
schedulerConfigFactory, finalFunc := mustSetupScheduler()
defer finalFunc()
c := schedulerConfigFactory.Client
nodePreparer := framework.NewIntegrationTestNodePreparer(
c,
map[int]testutils.PrepareNodeStrategy{numNodes: &testutils.TrivialNodePrepareStrategy{}},
"scheduler-perf-",
)
if err := nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer nodePreparer.CleanupNodes()
makePodsFromRC(c, "rc1", numScheduledPods)
for {
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= numScheduledPods {
break
}
time.Sleep(1 * time.Second)
}
// start benchmark
b.ResetTimer()
makePodsFromRC(c, "rc2", b.N)
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= numScheduledPods+b.N {
break
}
// Note: This might introduce slight deviation in accuracy of benchmark results.
// Since the total amount of time is relatively large, it might not be a concern.
time.Sleep(100 * time.Millisecond)
}
}
| test/integration/scheduler_perf/scheduler_bench_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0022265170700848103,
0.0003776534867938608,
0.00016639157547615469,
0.00017290719551965594,
0.0006163005600683391
] |
{
"id": 5,
"code_window": [
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tp.nodeToAppliedStrategy[nodes.Items[index].Name] = strategy\n",
"\t\t}\n",
"\t}\n",
"\treturn nil\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tp.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1030
} | // Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mvcc
import (
"math"
"github.com/coreos/etcd/mvcc/mvccpb"
"github.com/coreos/etcd/pkg/adt"
)
var (
// watchBatchMaxRevs is the maximum distinct revisions that
// may be sent to an unsynced watcher at a time. Declared as
// var instead of const for testing purposes.
watchBatchMaxRevs = 1000
)
type eventBatch struct {
// evs is a batch of revision-ordered events
evs []mvccpb.Event
// revs is the minimum unique revisions observed for this batch
revs int
// moreRev is first revision with more events following this batch
moreRev int64
}
func (eb *eventBatch) add(ev mvccpb.Event) {
if eb.revs > watchBatchMaxRevs {
// maxed out batch size
return
}
if len(eb.evs) == 0 {
// base case
eb.revs = 1
eb.evs = append(eb.evs, ev)
return
}
// revision accounting
ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision
evRev := ev.Kv.ModRevision
if evRev > ebRev {
eb.revs++
if eb.revs > watchBatchMaxRevs {
eb.moreRev = evRev
return
}
}
eb.evs = append(eb.evs, ev)
}
type watcherBatch map[*watcher]*eventBatch
func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
eb := wb[w]
if eb == nil {
eb = &eventBatch{}
wb[w] = eb
}
eb.add(ev)
}
// newWatcherBatch maps watchers to their matched events. It enables quick
// events look up by watcher.
func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
wb := make(watcherBatch)
for _, ev := range evs {
for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
if ev.Kv.ModRevision >= w.minRev {
// don't double notify
wb.add(w, ev)
}
}
}
return wb
}
type watcherSet map[*watcher]struct{}
func (w watcherSet) add(wa *watcher) {
if _, ok := w[wa]; ok {
panic("add watcher twice!")
}
w[wa] = struct{}{}
}
func (w watcherSet) union(ws watcherSet) {
for wa := range ws {
w.add(wa)
}
}
func (w watcherSet) delete(wa *watcher) {
if _, ok := w[wa]; !ok {
panic("removing missing watcher!")
}
delete(w, wa)
}
type watcherSetByKey map[string]watcherSet
func (w watcherSetByKey) add(wa *watcher) {
set := w[string(wa.key)]
if set == nil {
set = make(watcherSet)
w[string(wa.key)] = set
}
set.add(wa)
}
func (w watcherSetByKey) delete(wa *watcher) bool {
k := string(wa.key)
if v, ok := w[k]; ok {
if _, ok := v[wa]; ok {
delete(v, wa)
if len(v) == 0 {
// remove the set; nothing left
delete(w, k)
}
return true
}
}
return false
}
// watcherGroup is a collection of watchers organized by their ranges
type watcherGroup struct {
// keyWatchers has the watchers that watch on a single key
keyWatchers watcherSetByKey
// ranges has the watchers that watch a range; it is sorted by interval
ranges adt.IntervalTree
// watchers is the set of all watchers
watchers watcherSet
}
func newWatcherGroup() watcherGroup {
return watcherGroup{
keyWatchers: make(watcherSetByKey),
watchers: make(watcherSet),
}
}
// add puts a watcher in the group.
func (wg *watcherGroup) add(wa *watcher) {
wg.watchers.add(wa)
if wa.end == nil {
wg.keyWatchers.add(wa)
return
}
// interval already registered?
ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
if iv := wg.ranges.Find(ivl); iv != nil {
iv.Val.(watcherSet).add(wa)
return
}
// not registered, put in interval tree
ws := make(watcherSet)
ws.add(wa)
wg.ranges.Insert(ivl, ws)
}
// contains is whether the given key has a watcher in the group.
func (wg *watcherGroup) contains(key string) bool {
_, ok := wg.keyWatchers[key]
return ok || wg.ranges.Contains(adt.NewStringAffinePoint(key))
}
// size gives the number of unique watchers in the group.
func (wg *watcherGroup) size() int { return len(wg.watchers) }
// delete removes a watcher from the group.
func (wg *watcherGroup) delete(wa *watcher) bool {
if _, ok := wg.watchers[wa]; !ok {
return false
}
wg.watchers.delete(wa)
if wa.end == nil {
wg.keyWatchers.delete(wa)
return true
}
ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
iv := wg.ranges.Find(ivl)
if iv == nil {
return false
}
ws := iv.Val.(watcherSet)
delete(ws, wa)
if len(ws) == 0 {
// remove interval missing watchers
if ok := wg.ranges.Delete(ivl); !ok {
panic("could not remove watcher from interval tree")
}
}
return true
}
// choose selects watchers from the watcher group to update
func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) {
if len(wg.watchers) < maxWatchers {
return wg, wg.chooseAll(curRev, compactRev)
}
ret := newWatcherGroup()
for w := range wg.watchers {
if maxWatchers <= 0 {
break
}
maxWatchers--
ret.add(w)
}
return &ret, ret.chooseAll(curRev, compactRev)
}
func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 {
minRev := int64(math.MaxInt64)
for w := range wg.watchers {
if w.minRev > curRev {
panic("watcher current revision should not exceed current revision")
}
if w.minRev < compactRev {
select {
case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}:
w.compacted = true
wg.delete(w)
default:
// retry next time
}
continue
}
if minRev > w.minRev {
minRev = w.minRev
}
}
return minRev
}
// watcherSetByKey gets the set of watchers that receive events on the given key.
func (wg *watcherGroup) watcherSetByKey(key string) watcherSet {
wkeys := wg.keyWatchers[key]
wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key))
// zero-copy cases
switch {
case len(wranges) == 0:
// no need to merge ranges or copy; reuse single-key set
return wkeys
case len(wranges) == 0 && len(wkeys) == 0:
return nil
case len(wranges) == 1 && len(wkeys) == 0:
return wranges[0].Val.(watcherSet)
}
// copy case
ret := make(watcherSet)
ret.union(wg.keyWatchers[key])
for _, item := range wranges {
ret.union(item.Val.(watcherSet))
}
return ret
}
| vendor/github.com/coreos/etcd/mvcc/watcher_group.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00036487466422840953,
0.0001795594725990668,
0.00016482638602610677,
0.0001732761156745255,
0.00003586017191992141
] |
{
"id": 5,
"code_window": [
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tp.nodeToAppliedStrategy[nodes.Items[index].Name] = strategy\n",
"\t\t}\n",
"\t}\n",
"\treturn nil\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tp.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1030
} | // This file was generated by go generate; DO NOT EDIT
package bidi
// Class is the Unicode BiDi class. Each rune has a single class.
type Class uint
const (
L Class = iota // LeftToRight
R // RightToLeft
EN // EuropeanNumber
ES // EuropeanSeparator
ET // EuropeanTerminator
AN // ArabicNumber
CS // CommonSeparator
B // ParagraphSeparator
S // SegmentSeparator
WS // WhiteSpace
ON // OtherNeutral
BN // BoundaryNeutral
NSM // NonspacingMark
AL // ArabicLetter
Control // Control LRO - PDI
numClass
LRO // LeftToRightOverride
RLO // RightToLeftOverride
LRE // LeftToRightEmbedding
RLE // RightToLeftEmbedding
PDF // PopDirectionalFormat
LRI // LeftToRightIsolate
RLI // RightToLeftIsolate
FSI // FirstStrongIsolate
PDI // PopDirectionalIsolate
unknownClass = ^Class(0)
)
var controlToClass = map[rune]Class{
0x202D: LRO, // LeftToRightOverride,
0x202E: RLO, // RightToLeftOverride,
0x202A: LRE, // LeftToRightEmbedding,
0x202B: RLE, // RightToLeftEmbedding,
0x202C: PDF, // PopDirectionalFormat,
0x2066: LRI, // LeftToRightIsolate,
0x2067: RLI, // RightToLeftIsolate,
0x2068: FSI, // FirstStrongIsolate,
0x2069: PDI, // PopDirectionalIsolate,
}
// A trie entry has the following bits:
// 7..5 XOR mask for brackets
// 4 1: Bracket open, 0: Bracket close
// 3..0 Class type
const (
openMask = 0x10
xorMaskShift = 5
)
| staging/src/k8s.io/client-go/_vendor/golang.org/x/text/unicode/bidi/trieval.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017912442854139954,
0.0001703714078757912,
0.00016598084766883403,
0.00016781027079559863,
0.0000046981681407487486
] |
{
"id": 5,
"code_window": [
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tp.nodeToAppliedStrategy[nodes.Items[index].Name] = strategy\n",
"\t\t}\n",
"\t}\n",
"\treturn nil\n",
"}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tp.nodeToAppliedStrategy[nodes.Items[index].Name] = v.Strategy\n"
],
"file_path": "test/e2e/framework/util.go",
"type": "replace",
"edit_start_line_idx": 1030
} | apiVersion: v1
kind: ReplicationController
metadata:
name: cassandra
# The labels will be applied automatically
# from the labels in the pod template, if not set
# labels:
# app: cassandra
spec:
replicas: 2
# The selector will be applied automatically
# from the labels in the pod template, if not set.
# selector:
# app: cassandra
template:
metadata:
labels:
app: cassandra
spec:
containers:
- command:
- /run.sh
resources:
limits:
cpu: 0.5
env:
- name: MAX_HEAP_SIZE
value: 512M
- name: HEAP_NEWSIZE
value: 100M
- name: CASSANDRA_SEED_PROVIDER
value: "io.k8s.cassandra.KubernetesSeedProvider"
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
image: gcr.io/google-samples/cassandra:v11
name: cassandra
ports:
- containerPort: 7000
name: intra-node
- containerPort: 7001
name: tls-intra-node
- containerPort: 7199
name: jmx
- containerPort: 9042
name: cql
volumeMounts:
- mountPath: /cassandra_data
name: data
volumes:
- name: data
emptyDir: {}
| examples/storage/cassandra/cassandra-controller.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017656169075053185,
0.00017426312842871994,
0.0001707729243207723,
0.00017460243543609977,
0.0000019346384760865476
] |
{
"id": 6,
"code_window": [
")\n",
"\n",
"type IntegrationTestNodePreparer struct {\n",
"\tclient clientset.Interface\n",
"\tcountToStrategy map[int]testutils.PrepareNodeStrategy\n",
"\tnodeNamePrefix string\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcountToStrategy []testutils.CountToStrategy\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 34
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
retries = 5
)
type IntegrationTestNodePreparer struct {
client clientset.Interface
countToStrategy map[int]testutils.PrepareNodeStrategy
nodeNamePrefix string
}
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
return &IntegrationTestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeNamePrefix: nodeNamePrefix,
}
}
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
numNodes := 0
for k := range p.countToStrategy {
numNodes += k
}
glog.Infof("Making %d nodes", numNodes)
baseNode := &api.Node{
ObjectMeta: api.ObjectMeta{
GenerateName: p.nodeNamePrefix,
},
Spec: api.NodeSpec{
// TODO: investigate why this is needed.
ExternalID: "foo",
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: api.NodeRunning,
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
for i := 0; i < numNodes; i++ {
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
glog.Fatalf("Error creating node: %v", err)
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
index := 0
sum := 0
for k, strategy := range p.countToStrategy {
sum += k
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
}
}
return nil
}
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &api.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}
return nil
}
| test/integration/framework/perf_utils.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.9990798234939575,
0.2738654911518097,
0.00016415867139585316,
0.00017888240108732134,
0.4433837831020355
] |
{
"id": 6,
"code_window": [
")\n",
"\n",
"type IntegrationTestNodePreparer struct {\n",
"\tclient clientset.Interface\n",
"\tcountToStrategy map[int]testutils.PrepareNodeStrategy\n",
"\tnodeNamePrefix string\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcountToStrategy []testutils.CountToStrategy\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 34
} | // Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.2
package language
import "sort"
func sortStable(s sort.Interface) {
ss := stableSort{
s: s,
pos: make([]int, s.Len()),
}
for i := range ss.pos {
ss.pos[i] = i
}
sort.Sort(&ss)
}
type stableSort struct {
s sort.Interface
pos []int
}
func (s *stableSort) Len() int {
return len(s.pos)
}
func (s *stableSort) Less(i, j int) bool {
return s.s.Less(i, j) || !s.s.Less(j, i) && s.pos[i] < s.pos[j]
}
func (s *stableSort) Swap(i, j int) {
s.s.Swap(i, j)
s.pos[i], s.pos[j] = s.pos[j], s.pos[i]
}
| staging/src/k8s.io/client-go/_vendor/golang.org/x/text/language/go1_1.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00018032520893029869,
0.00017311741248704493,
0.00016674003563821316,
0.0001727021881379187,
0.000004967932909494266
] |
{
"id": 6,
"code_window": [
")\n",
"\n",
"type IntegrationTestNodePreparer struct {\n",
"\tclient clientset.Interface\n",
"\tcountToStrategy map[int]testutils.PrepareNodeStrategy\n",
"\tnodeNamePrefix string\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcountToStrategy []testutils.CountToStrategy\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 34
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Downward API volume", func() {
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
f := framework.NewDefaultFramework("downward-api")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
It("should provide podname only [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
It("should set DefaultMode on files [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", nil, &defaultMode)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--------",
})
})
It("should set mode on item file [Conformance]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--------",
})
})
It("should provide podname as non-root with fsgroup [Feature:FSGroup]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
pod.Spec.SecurityContext = &api.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
It("should update labels on modification [Conformance]", func() {
labels := map[string]string{}
labels["key1"] = "value1"
labels["key2"] = "value2"
podName := "labelsupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/labels")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *api.Pod) {
pod.Labels["key3"] = "value3"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
})
It("should update annotations on modification [Conformance]", func() {
annotations := map[string]string{}
annotations["builder"] = "bar"
podName := "annotationupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/annotations")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *api.Pod) {
pod.Annotations["builder"] = "foo"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.Client, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
})
It("should provide container's cpu limit", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
It("should provide container's memory limit", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
It("should provide container's cpu request", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
It("should provide container's memory request", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
It("should provide node allocatable (cpu) as default cpu limit if the limit is not set", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/cpu_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
It("should provide node allocatable (memory) as default memory limit if the limit is not set", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/memory_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})
func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *api.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []api.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_mode=" + filePath},
VolumeMounts: []api.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
},
},
},
}
if itemMode != nil {
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].Mode = itemMode
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode = defaultMode
}
return pod
}
func downwardAPIVolumePodForSimpleTest(name string, filePath string) *api.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []api.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
VolumeMounts: []api.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
return pod
}
func downwardAPIVolumeForContainerResources(name string, filePath string) *api.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath)
return pod
}
func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *api.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath)
return pod
}
func downwardAPIVolumeBaseContainers(name, filePath string) []api.Container {
return []api.Container{
{
Name: name,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: resource.MustParse("250m"),
api.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: api.ResourceList{
api.ResourceCPU: resource.MustParse("1250m"),
api.ResourceMemory: resource.MustParse("64Mi"),
},
},
VolumeMounts: []api.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
}
func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []api.Container {
return []api.Container{
{
Name: name,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
VolumeMounts: []api.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
},
},
},
}
}
func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *api.Pod {
pod := downwardAPIVolumeBasePod(name, labels, annotations)
pod.Spec.Containers = []api.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
VolumeMounts: []api.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations, pod)
return pod
}
func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *api.Pod {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
Annotations: annotations,
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: "podinfo",
VolumeSource: api.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{
Items: []api.DownwardAPIVolumeFile{
{
Path: "podname",
FieldRef: &api.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
{
Path: "cpu_limit",
ResourceFieldRef: &api.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.cpu",
},
},
{
Path: "cpu_request",
ResourceFieldRef: &api.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.cpu",
},
},
{
Path: "memory_limit",
ResourceFieldRef: &api.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.memory",
},
},
{
Path: "memory_request",
ResourceFieldRef: &api.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.memory",
},
},
},
},
},
},
},
RestartPolicy: api.RestartPolicyNever,
},
}
return pod
}
func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *api.Pod) {
if len(labels) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, api.DownwardAPIVolumeFile{
Path: "labels",
FieldRef: &api.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.labels",
},
})
}
if len(annotations) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, api.DownwardAPIVolumeFile{
Path: "annotations",
FieldRef: &api.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.annotations",
},
})
}
}
// TODO: add test-webserver example as pointed out in https://github.com/kubernetes/kubernetes/pull/5093#discussion-diff-37606771
| test/e2e/common/downwardapi_volume.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0001792172115528956,
0.00017129415937233716,
0.00016573302855249494,
0.00017165810277219862,
0.0000028352542358334176
] |
{
"id": 6,
"code_window": [
")\n",
"\n",
"type IntegrationTestNodePreparer struct {\n",
"\tclient clientset.Interface\n",
"\tcountToStrategy map[int]testutils.PrepareNodeStrategy\n",
"\tnodeNamePrefix string\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tcountToStrategy []testutils.CountToStrategy\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 34
} | // Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import "fmt"
// Returns a mask of all cores on the machine if the passed-in mask is empty.
func FixCpuMask(mask string, cores int) string {
if mask == "" {
if cores > 1 {
mask = fmt.Sprintf("0-%d", cores-1)
} else {
mask = "0"
}
}
return mask
}
| vendor/github.com/google/cadvisor/utils/utils.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017934164498001337,
0.00017448532162234187,
0.00016946195682976395,
0.00017465237760916352,
0.000004035095116705634
] |
{
"id": 7,
"code_window": [
"\tnodeNamePrefix string\n",
"}\n",
"\n",
"func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {\n",
"\treturn &IntegrationTestNodePreparer{\n",
"\t\tclient: client,\n",
"\t\tcountToStrategy: countToStrategy,\n",
"\t\tnodeNamePrefix: nodeNamePrefix,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"math"
"os"
"time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid"
"github.com/golang/glog"
)
const (
// String used to mark pod deletion
nonExist = "NonExist"
)
type RCConfig struct {
Client *client.Client
Image string
Command []string
Name string
Namespace string
PollInterval time.Duration
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
ReadinessProbe *api.Probe
DNSPolicy *api.DNSPolicy
// Env vars, set the same for every pod.
Env map[string]string
// Extra labels added to every pod.
Labels map[string]string
// Node selector for pods in the RC.
NodeSelector map[string]string
// Ports to declare in the container (map of name to containerPort).
Ports map[string]int
// Ports to declare in the container as host and container ports.
HostPorts map[string]int
Volumes []api.Volume
VolumeMounts []api.VolumeMount
// Pointer to a list of pods; if non-nil, will be set to a list of pods
// created by this RC by RunRC.
CreatedPods *[]*api.Pod
// Maximum allowable container failures. If exceeded, RunRC returns an error.
// Defaults to replicas*0.1 if unspecified.
MaxContainerFailures *int
// If set to false starting RC will print progress, otherwise only errors will be printed.
Silent bool
// If set this function will be used to print log lines instead of glog.
LogFunc func(fmt string, args ...interface{})
// If set those functions will be used to gather data from Nodes - in integration tests where no
// kubelets are running those variables should be nil.
NodeDumpFunc func(c *client.Client, nodeNames []string, logFunc func(fmt string, args ...interface{}))
ContainerDumpFunc func(c *client.Client, ns string, logFunc func(ftm string, args ...interface{}))
}
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
if rc.LogFunc != nil {
rc.LogFunc(fmt, args...)
}
glog.Infof(fmt, args...)
}
type DeploymentConfig struct {
RCConfig
}
type ReplicaSetConfig struct {
RCConfig
}
// podInfo contains pod information useful for debugging e2e tests.
type podInfo struct {
oldHostname string
oldPhase string
hostname string
phase string
}
// PodDiff is a map of pod name to podInfos
type PodDiff map[string]*podInfo
// Print formats and prints the give PodDiff.
func (p PodDiff) String(ignorePhases sets.String) string {
ret := ""
for name, info := range p {
if ignorePhases.Has(info.phase) {
continue
}
if info.phase == nonExist {
ret += fmt.Sprintf("Pod %v was deleted, had phase %v and host %v\n", name, info.oldPhase, info.oldHostname)
continue
}
phaseChange, hostChange := false, false
msg := fmt.Sprintf("Pod %v ", name)
if info.oldPhase != info.phase {
phaseChange = true
if info.oldPhase == nonExist {
msg += fmt.Sprintf("in phase %v ", info.phase)
} else {
msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase)
}
}
if info.oldHostname != info.hostname {
hostChange = true
if info.oldHostname == nonExist || info.oldHostname == "" {
msg += fmt.Sprintf("assigned host %v ", info.hostname)
} else {
msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname)
}
}
if phaseChange || hostChange {
ret += msg + "\n"
}
}
return ret
}
// Diff computes a PodDiff given 2 lists of pods.
func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
podInfoMap := PodDiff{}
// New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist.
for _, pod := range curPods {
podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.NodeName, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist}
}
// Deleted pods will show up in the oldPods list but not in curPods. They have a hostname/phase == nonexist.
for _, pod := range oldPods {
if info, ok := podInfoMap[pod.Name]; ok {
info.oldHostname, info.oldPhase = pod.Spec.NodeName, string(pod.Status.Phase)
} else {
podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.NodeName, oldPhase: string(pod.Status.Phase)}
}
}
return podInfoMap
}
// RunDeployment Launches (and verifies correctness) of a Deployment
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunDeployment(config DeploymentConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *DeploymentConfig) create() error {
deployment := &extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.DeploymentSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&deployment.Spec.Template)
_, err := config.Client.Deployments(config.Namespace).Create(deployment)
if err != nil {
return fmt.Errorf("Error creating deployment: %v", err)
}
config.RCConfigLog("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, deployment.Spec.Replicas)
return nil
}
// RunReplicaSet launches (and verifies correctness) of a ReplicaSet
// and waits until all the pods it launches to reach the "Running" state.
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunReplicaSet(config ReplicaSetConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *ReplicaSetConfig) create() error {
rs := &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&rs.Spec.Template)
_, err := config.Client.ReplicaSets(config.Namespace).Create(rs)
if err != nil {
return fmt.Errorf("Error creating replica set: %v", err)
}
config.RCConfigLog("Created replica set with name: %v, namespace: %v, replica count: %v", rs.Name, config.Namespace, rs.Spec.Replicas)
return nil
}
// RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunRC(config RCConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *RCConfig) create() error {
dnsDefault := api.DNSDefault
if config.DNSPolicy == nil {
config.DNSPolicy = &dnsDefault
}
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: api.ReplicationControllerSpec{
Replicas: int32(config.Replicas),
Selector: map[string]string{
"name": config.Name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
},
},
DNSPolicy: *config.DNSPolicy,
NodeSelector: config.NodeSelector,
},
},
},
}
config.applyTo(rc.Spec.Template)
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
if err != nil {
return fmt.Errorf("Error creating replication controller: %v", err)
}
config.RCConfigLog("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
return nil
}
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
}
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
}
if config.NodeSelector != nil {
template.Spec.NodeSelector = make(map[string]string)
for k, v := range config.NodeSelector {
template.Spec.NodeSelector[k] = v
}
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
}
}
if config.HostPorts != nil {
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
}
if config.CpuLimit > 0 || config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
if len(config.Volumes) > 0 {
template.Spec.Volumes = config.Volumes
}
if len(config.VolumeMounts) > 0 {
template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
}
}
type RCStartupStatus struct {
Expected int
Terminating int
Running int
RunningButNotReady int
Waiting int
Pending int
Unknown int
Inactive int
FailedContainers int
Created []*api.Pod
ContainerRestartNodes sets.String
}
func (s *RCStartupStatus) String(name string) string {
return fmt.Sprintf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
name, len(s.Created), s.Expected, s.Running, s.Pending, s.Waiting, s.Inactive, s.Terminating, s.Unknown, s.RunningButNotReady)
}
func ComputeRCStartupStatus(pods []*api.Pod, expected int) RCStartupStatus {
startupStatus := RCStartupStatus{
Expected: expected,
Created: make([]*api.Pod, 0, expected),
ContainerRestartNodes: sets.NewString(),
}
for _, p := range pods {
if p.DeletionTimestamp != nil {
startupStatus.Terminating++
continue
}
startupStatus.Created = append(startupStatus.Created, p)
if p.Status.Phase == api.PodRunning {
ready := false
for _, c := range p.Status.Conditions {
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
ready = true
break
}
}
if ready {
// Only count a pod is running when it is also ready.
startupStatus.Running++
} else {
startupStatus.RunningButNotReady++
}
for _, v := range FailedContainers(p) {
startupStatus.FailedContainers = startupStatus.FailedContainers + v.Restarts
startupStatus.ContainerRestartNodes.Insert(p.Spec.NodeName)
}
} else if p.Status.Phase == api.PodPending {
if p.Spec.NodeName == "" {
startupStatus.Waiting++
} else {
startupStatus.Pending++
}
} else if p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed {
startupStatus.Inactive++
} else if p.Status.Phase == api.PodUnknown {
startupStatus.Unknown++
}
}
return startupStatus
}
func (config *RCConfig) start() error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
} else {
maxContainerFailures = *config.MaxContainerFailures
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
PodStore := NewPodStore(config.Client, config.Namespace, label, fields.Everything())
defer PodStore.Stop()
interval := config.PollInterval
if interval <= 0 {
interval = 10 * time.Second
}
timeout := config.Timeout
if timeout <= 0 {
timeout = 5 * time.Minute
}
oldPods := make([]*api.Pod, 0)
oldRunning := 0
lastChange := time.Now()
for oldRunning != config.Replicas {
time.Sleep(interval)
pods := PodStore.List()
startupStatus := ComputeRCStartupStatus(pods, config.Replicas)
pods = startupStatus.Created
if config.CreatedPods != nil {
*config.CreatedPods = pods
}
if !config.Silent {
config.RCConfigLog(startupStatus.String(config.Name))
}
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", startupStatus.Running, startupStatus.Pending, startupStatus.Waiting, startupStatus.Inactive, startupStatus.Unknown, startupStatus.RunningButNotReady)
}
if startupStatus.FailedContainers > maxContainerFailures {
if config.NodeDumpFunc != nil {
config.NodeDumpFunc(config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog)
}
if config.ContainerDumpFunc != nil {
// Get the logs from the failed containers to help diagnose what caused them to fail
config.ContainerDumpFunc(config.Client, config.Namespace, config.RCConfigLog)
}
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
}
if len(pods) < len(oldPods) || len(pods) > config.Replicas {
// This failure mode includes:
// kubelet is dead, so node controller deleted pods and rc creates more
// - diagnose by noting the pod diff below.
// pod is unhealthy, so replication controller creates another to take its place
// - diagnose by comparing the previous "2 Pod states" lines for inactive pods
errorStr := fmt.Sprintf("Number of reported pods for %s changed: %d vs %d", config.Name, len(pods), len(oldPods))
config.RCConfigLog("%v, pods that changed since the last iteration:", errorStr)
config.RCConfigLog(Diff(oldPods, pods).String(sets.NewString()))
return fmt.Errorf(errorStr)
}
if len(pods) > len(oldPods) || startupStatus.Running > oldRunning {
lastChange = time.Now()
}
oldPods = pods
oldRunning = startupStatus.Running
if time.Since(lastChange) > timeout {
break
}
}
if oldRunning != config.Replicas {
// List only pods from a given replication controller.
options := api.ListOptions{LabelSelector: label}
if pods, err := config.Client.Pods(api.NamespaceAll).List(options); err == nil {
for _, pod := range pods.Items {
config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
}
} else {
config.RCConfigLog("Can't list pod debug info: %v", err)
}
return fmt.Errorf("Only %d pods started out of %d", oldRunning, config.Replicas)
}
return nil
}
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string,
pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
}
startPodsID := string(uuid.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Pods(namespace).Create(&pod)
if err != nil {
return err
}
}
logFunc("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
if err != nil {
return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
}
}
return nil
}
// Wait up to 10 minutes for all matching pods to become Running and at least one
// matching pod exists.
func WaitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error {
running := false
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
waitLoop:
for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) {
pods := PodStore.List()
if len(pods) == 0 {
continue waitLoop
}
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
continue waitLoop
}
}
running = true
break
}
if !running {
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
}
return nil
}
type TestNodePreparer interface {
PrepareNodes() error
CleanupNodes() error
}
type PrepareNodeStrategy interface {
PreparePatch(node *api.Node) []byte
CleanupNode(node *api.Node) *api.Node
}
type TrivialNodePrepareStrategy struct{}
func (*TrivialNodePrepareStrategy) PreparePatch(*api.Node) []byte {
return []byte{}
}
func (*TrivialNodePrepareStrategy) CleanupNode(node *api.Node) *api.Node {
nodeCopy := *node
return &nodeCopy
}
func DoPrepareNode(client clientset.Interface, node *api.Node, strategy PrepareNodeStrategy) error {
var err error
patch := strategy.PreparePatch(node)
if len(patch) == 0 {
return nil
}
for attempt := 0; attempt < retries; attempt++ {
if _, err = client.Core().Nodes().Patch(node.Name, api.MergePatchType, []byte(patch)); err == nil {
return nil
}
if !apierrs.IsConflict(err) {
return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("To many conflicts when applying patch %v to Node %v", string(patch), node.Name)
}
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
for attempt := 0; attempt < retries; attempt++ {
node, err := client.Core().Nodes().Get(nodeName)
if err != nil {
return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
}
updatedNode := strategy.CleanupNode(node)
if api.Semantic.DeepEqual(node, updatedNode) {
return nil
}
if _, err = client.Core().Nodes().Update(updatedNode); err == nil {
return nil
}
if !apierrs.IsConflict(err) {
return fmt.Errorf("Error when updating Node %v: %v", nodeName, err)
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("To many conflicts when trying to cleanup Node %v", nodeName)
}
| test/utils/runners.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.017485572025179863,
0.001009131083264947,
0.00016691024939063936,
0.00017926658620126545,
0.002743887947872281
] |
{
"id": 7,
"code_window": [
"\tnodeNamePrefix string\n",
"}\n",
"\n",
"func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {\n",
"\treturn &IntegrationTestNodePreparer{\n",
"\t\tclient: client,\n",
"\t\tcountToStrategy: countToStrategy,\n",
"\t\tnodeNamePrefix: nodeNamePrefix,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 38
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
type PodDisruptionBudgetExpansion interface{}
| pkg/client/clientset_generated/release_1_4/typed/policy/v1alpha1/generated_expansion.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017345513333566487,
0.00017007517453748733,
0.0001666952157393098,
0.00017007517453748733,
0.0000033799587981775403
] |
{
"id": 7,
"code_window": [
"\tnodeNamePrefix string\n",
"}\n",
"\n",
"func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {\n",
"\treturn &IntegrationTestNodePreparer{\n",
"\t\tclient: client,\n",
"\t\tcountToStrategy: countToStrategy,\n",
"\t\tnodeNamePrefix: nodeNamePrefix,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 38
} | // mksysnum_linux.pl /usr/include/asm/unistd.h
// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
// +build mips64,linux
package unix
const (
SYS_READ = 5000
SYS_WRITE = 5001
SYS_OPEN = 5002
SYS_CLOSE = 5003
SYS_STAT = 5004
SYS_FSTAT = 5005
SYS_LSTAT = 5006
SYS_POLL = 5007
SYS_LSEEK = 5008
SYS_MMAP = 5009
SYS_MPROTECT = 5010
SYS_MUNMAP = 5011
SYS_BRK = 5012
SYS_RT_SIGACTION = 5013
SYS_RT_SIGPROCMASK = 5014
SYS_IOCTL = 5015
SYS_PREAD64 = 5016
SYS_PWRITE64 = 5017
SYS_READV = 5018
SYS_WRITEV = 5019
SYS_ACCESS = 5020
SYS_PIPE = 5021
SYS__NEWSELECT = 5022
SYS_SCHED_YIELD = 5023
SYS_MREMAP = 5024
SYS_MSYNC = 5025
SYS_MINCORE = 5026
SYS_MADVISE = 5027
SYS_SHMGET = 5028
SYS_SHMAT = 5029
SYS_SHMCTL = 5030
SYS_DUP = 5031
SYS_DUP2 = 5032
SYS_PAUSE = 5033
SYS_NANOSLEEP = 5034
SYS_GETITIMER = 5035
SYS_SETITIMER = 5036
SYS_ALARM = 5037
SYS_GETPID = 5038
SYS_SENDFILE = 5039
SYS_SOCKET = 5040
SYS_CONNECT = 5041
SYS_ACCEPT = 5042
SYS_SENDTO = 5043
SYS_RECVFROM = 5044
SYS_SENDMSG = 5045
SYS_RECVMSG = 5046
SYS_SHUTDOWN = 5047
SYS_BIND = 5048
SYS_LISTEN = 5049
SYS_GETSOCKNAME = 5050
SYS_GETPEERNAME = 5051
SYS_SOCKETPAIR = 5052
SYS_SETSOCKOPT = 5053
SYS_GETSOCKOPT = 5054
SYS_CLONE = 5055
SYS_FORK = 5056
SYS_EXECVE = 5057
SYS_EXIT = 5058
SYS_WAIT4 = 5059
SYS_KILL = 5060
SYS_UNAME = 5061
SYS_SEMGET = 5062
SYS_SEMOP = 5063
SYS_SEMCTL = 5064
SYS_SHMDT = 5065
SYS_MSGGET = 5066
SYS_MSGSND = 5067
SYS_MSGRCV = 5068
SYS_MSGCTL = 5069
SYS_FCNTL = 5070
SYS_FLOCK = 5071
SYS_FSYNC = 5072
SYS_FDATASYNC = 5073
SYS_TRUNCATE = 5074
SYS_FTRUNCATE = 5075
SYS_GETDENTS = 5076
SYS_GETCWD = 5077
SYS_CHDIR = 5078
SYS_FCHDIR = 5079
SYS_RENAME = 5080
SYS_MKDIR = 5081
SYS_RMDIR = 5082
SYS_CREAT = 5083
SYS_LINK = 5084
SYS_UNLINK = 5085
SYS_SYMLINK = 5086
SYS_READLINK = 5087
SYS_CHMOD = 5088
SYS_FCHMOD = 5089
SYS_CHOWN = 5090
SYS_FCHOWN = 5091
SYS_LCHOWN = 5092
SYS_UMASK = 5093
SYS_GETTIMEOFDAY = 5094
SYS_GETRLIMIT = 5095
SYS_GETRUSAGE = 5096
SYS_SYSINFO = 5097
SYS_TIMES = 5098
SYS_PTRACE = 5099
SYS_GETUID = 5100
SYS_SYSLOG = 5101
SYS_GETGID = 5102
SYS_SETUID = 5103
SYS_SETGID = 5104
SYS_GETEUID = 5105
SYS_GETEGID = 5106
SYS_SETPGID = 5107
SYS_GETPPID = 5108
SYS_GETPGRP = 5109
SYS_SETSID = 5110
SYS_SETREUID = 5111
SYS_SETREGID = 5112
SYS_GETGROUPS = 5113
SYS_SETGROUPS = 5114
SYS_SETRESUID = 5115
SYS_GETRESUID = 5116
SYS_SETRESGID = 5117
SYS_GETRESGID = 5118
SYS_GETPGID = 5119
SYS_SETFSUID = 5120
SYS_SETFSGID = 5121
SYS_GETSID = 5122
SYS_CAPGET = 5123
SYS_CAPSET = 5124
SYS_RT_SIGPENDING = 5125
SYS_RT_SIGTIMEDWAIT = 5126
SYS_RT_SIGQUEUEINFO = 5127
SYS_RT_SIGSUSPEND = 5128
SYS_SIGALTSTACK = 5129
SYS_UTIME = 5130
SYS_MKNOD = 5131
SYS_PERSONALITY = 5132
SYS_USTAT = 5133
SYS_STATFS = 5134
SYS_FSTATFS = 5135
SYS_SYSFS = 5136
SYS_GETPRIORITY = 5137
SYS_SETPRIORITY = 5138
SYS_SCHED_SETPARAM = 5139
SYS_SCHED_GETPARAM = 5140
SYS_SCHED_SETSCHEDULER = 5141
SYS_SCHED_GETSCHEDULER = 5142
SYS_SCHED_GET_PRIORITY_MAX = 5143
SYS_SCHED_GET_PRIORITY_MIN = 5144
SYS_SCHED_RR_GET_INTERVAL = 5145
SYS_MLOCK = 5146
SYS_MUNLOCK = 5147
SYS_MLOCKALL = 5148
SYS_MUNLOCKALL = 5149
SYS_VHANGUP = 5150
SYS_PIVOT_ROOT = 5151
SYS__SYSCTL = 5152
SYS_PRCTL = 5153
SYS_ADJTIMEX = 5154
SYS_SETRLIMIT = 5155
SYS_CHROOT = 5156
SYS_SYNC = 5157
SYS_ACCT = 5158
SYS_SETTIMEOFDAY = 5159
SYS_MOUNT = 5160
SYS_UMOUNT2 = 5161
SYS_SWAPON = 5162
SYS_SWAPOFF = 5163
SYS_REBOOT = 5164
SYS_SETHOSTNAME = 5165
SYS_SETDOMAINNAME = 5166
SYS_CREATE_MODULE = 5167
SYS_INIT_MODULE = 5168
SYS_DELETE_MODULE = 5169
SYS_GET_KERNEL_SYMS = 5170
SYS_QUERY_MODULE = 5171
SYS_QUOTACTL = 5172
SYS_NFSSERVCTL = 5173
SYS_GETPMSG = 5174
SYS_PUTPMSG = 5175
SYS_AFS_SYSCALL = 5176
SYS_RESERVED177 = 5177
SYS_GETTID = 5178
SYS_READAHEAD = 5179
SYS_SETXATTR = 5180
SYS_LSETXATTR = 5181
SYS_FSETXATTR = 5182
SYS_GETXATTR = 5183
SYS_LGETXATTR = 5184
SYS_FGETXATTR = 5185
SYS_LISTXATTR = 5186
SYS_LLISTXATTR = 5187
SYS_FLISTXATTR = 5188
SYS_REMOVEXATTR = 5189
SYS_LREMOVEXATTR = 5190
SYS_FREMOVEXATTR = 5191
SYS_TKILL = 5192
SYS_RESERVED193 = 5193
SYS_FUTEX = 5194
SYS_SCHED_SETAFFINITY = 5195
SYS_SCHED_GETAFFINITY = 5196
SYS_CACHEFLUSH = 5197
SYS_CACHECTL = 5198
SYS_SYSMIPS = 5199
SYS_IO_SETUP = 5200
SYS_IO_DESTROY = 5201
SYS_IO_GETEVENTS = 5202
SYS_IO_SUBMIT = 5203
SYS_IO_CANCEL = 5204
SYS_EXIT_GROUP = 5205
SYS_LOOKUP_DCOOKIE = 5206
SYS_EPOLL_CREATE = 5207
SYS_EPOLL_CTL = 5208
SYS_EPOLL_WAIT = 5209
SYS_REMAP_FILE_PAGES = 5210
SYS_RT_SIGRETURN = 5211
SYS_SET_TID_ADDRESS = 5212
SYS_RESTART_SYSCALL = 5213
SYS_SEMTIMEDOP = 5214
SYS_FADVISE64 = 5215
SYS_TIMER_CREATE = 5216
SYS_TIMER_SETTIME = 5217
SYS_TIMER_GETTIME = 5218
SYS_TIMER_GETOVERRUN = 5219
SYS_TIMER_DELETE = 5220
SYS_CLOCK_SETTIME = 5221
SYS_CLOCK_GETTIME = 5222
SYS_CLOCK_GETRES = 5223
SYS_CLOCK_NANOSLEEP = 5224
SYS_TGKILL = 5225
SYS_UTIMES = 5226
SYS_MBIND = 5227
SYS_GET_MEMPOLICY = 5228
SYS_SET_MEMPOLICY = 5229
SYS_MQ_OPEN = 5230
SYS_MQ_UNLINK = 5231
SYS_MQ_TIMEDSEND = 5232
SYS_MQ_TIMEDRECEIVE = 5233
SYS_MQ_NOTIFY = 5234
SYS_MQ_GETSETATTR = 5235
SYS_VSERVER = 5236
SYS_WAITID = 5237
SYS_ADD_KEY = 5239
SYS_REQUEST_KEY = 5240
SYS_KEYCTL = 5241
SYS_SET_THREAD_AREA = 5242
SYS_INOTIFY_INIT = 5243
SYS_INOTIFY_ADD_WATCH = 5244
SYS_INOTIFY_RM_WATCH = 5245
SYS_MIGRATE_PAGES = 5246
SYS_OPENAT = 5247
SYS_MKDIRAT = 5248
SYS_MKNODAT = 5249
SYS_FCHOWNAT = 5250
SYS_FUTIMESAT = 5251
SYS_NEWFSTATAT = 5252
SYS_UNLINKAT = 5253
SYS_RENAMEAT = 5254
SYS_LINKAT = 5255
SYS_SYMLINKAT = 5256
SYS_READLINKAT = 5257
SYS_FCHMODAT = 5258
SYS_FACCESSAT = 5259
SYS_PSELECT6 = 5260
SYS_PPOLL = 5261
SYS_UNSHARE = 5262
SYS_SPLICE = 5263
SYS_SYNC_FILE_RANGE = 5264
SYS_TEE = 5265
SYS_VMSPLICE = 5266
SYS_MOVE_PAGES = 5267
SYS_SET_ROBUST_LIST = 5268
SYS_GET_ROBUST_LIST = 5269
SYS_KEXEC_LOAD = 5270
SYS_GETCPU = 5271
SYS_EPOLL_PWAIT = 5272
SYS_IOPRIO_SET = 5273
SYS_IOPRIO_GET = 5274
SYS_UTIMENSAT = 5275
SYS_SIGNALFD = 5276
SYS_TIMERFD = 5277
SYS_EVENTFD = 5278
SYS_FALLOCATE = 5279
SYS_TIMERFD_CREATE = 5280
SYS_TIMERFD_GETTIME = 5281
SYS_TIMERFD_SETTIME = 5282
SYS_SIGNALFD4 = 5283
SYS_EVENTFD2 = 5284
SYS_EPOLL_CREATE1 = 5285
SYS_DUP3 = 5286
SYS_PIPE2 = 5287
SYS_INOTIFY_INIT1 = 5288
SYS_PREADV = 5289
SYS_PWRITEV = 5290
SYS_RT_TGSIGQUEUEINFO = 5291
SYS_PERF_EVENT_OPEN = 5292
SYS_ACCEPT4 = 5293
SYS_RECVMMSG = 5294
SYS_FANOTIFY_INIT = 5295
SYS_FANOTIFY_MARK = 5296
SYS_PRLIMIT64 = 5297
SYS_NAME_TO_HANDLE_AT = 5298
SYS_OPEN_BY_HANDLE_AT = 5299
SYS_CLOCK_ADJTIME = 5300
SYS_SYNCFS = 5301
SYS_SENDMMSG = 5302
SYS_SETNS = 5303
SYS_PROCESS_VM_READV = 5304
SYS_PROCESS_VM_WRITEV = 5305
SYS_KCMP = 5306
SYS_FINIT_MODULE = 5307
SYS_GETDENTS64 = 5308
SYS_SCHED_SETATTR = 5309
SYS_SCHED_GETATTR = 5310
SYS_RENAMEAT2 = 5311
SYS_SECCOMP = 5312
SYS_GETRANDOM = 5313
SYS_MEMFD_CREATE = 5314
SYS_BPF = 5315
SYS_EXECVEAT = 5316
SYS_USERFAULTFD = 5317
SYS_MEMBARRIER = 5318
)
| staging/src/k8s.io/client-go/_vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00034129765117540956,
0.00020281826436985284,
0.00016923573275562376,
0.00018931642989628017,
0.00003502827894408256
] |
{
"id": 7,
"code_window": [
"\tnodeNamePrefix string\n",
"}\n",
"\n",
"func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {\n",
"\treturn &IntegrationTestNodePreparer{\n",
"\t\tclient: client,\n",
"\t\tcountToStrategy: countToStrategy,\n",
"\t\tnodeNamePrefix: nodeNamePrefix,\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeNamePrefix string) testutils.TestNodePreparer {\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 38
} | package libcontainer
// NewConsole returns an initalized console that can be used within a container
func NewConsole(uid, gid int) (Console, error) {
return &windowsConsole{}, nil
}
// windowsConsole is a Windows psuedo TTY for use within a container.
type windowsConsole struct {
}
func (c *windowsConsole) Fd() uintptr {
return 0
}
func (c *windowsConsole) Path() string {
return ""
}
func (c *windowsConsole) Read(b []byte) (int, error) {
return 0, nil
}
func (c *windowsConsole) Write(b []byte) (int, error) {
return 0, nil
}
func (c *windowsConsole) Close() error {
return nil
}
| vendor/github.com/opencontainers/runc/libcontainer/console_windows.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0033519412390887737,
0.0014877133071422577,
0.00016730109928175807,
0.0012158052995800972,
0.0013746306067332625
] |
{
"id": 8,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (p *IntegrationTestNodePreparer) PrepareNodes() error {\n",
"\tnumNodes := 0\n",
"\tfor k := range p.countToStrategy {\n",
"\t\tnumNodes += k\n",
"\t}\n",
"\n",
"\tglog.Infof(\"Making %d nodes\", numNodes)\n",
"\tbaseNode := &api.Node{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, v := range p.countToStrategy {\n",
"\t\tnumNodes += v.Count\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 48
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
retries = 5
)
type IntegrationTestNodePreparer struct {
client clientset.Interface
countToStrategy map[int]testutils.PrepareNodeStrategy
nodeNamePrefix string
}
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
return &IntegrationTestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeNamePrefix: nodeNamePrefix,
}
}
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
numNodes := 0
for k := range p.countToStrategy {
numNodes += k
}
glog.Infof("Making %d nodes", numNodes)
baseNode := &api.Node{
ObjectMeta: api.ObjectMeta{
GenerateName: p.nodeNamePrefix,
},
Spec: api.NodeSpec{
// TODO: investigate why this is needed.
ExternalID: "foo",
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: api.NodeRunning,
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
for i := 0; i < numNodes; i++ {
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
glog.Fatalf("Error creating node: %v", err)
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
index := 0
sum := 0
for k, strategy := range p.countToStrategy {
sum += k
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
}
}
return nil
}
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &api.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}
return nil
}
| test/integration/framework/perf_utils.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.9983933568000793,
0.4377134144306183,
0.00016899852198548615,
0.011511902324855328,
0.4783693552017212
] |
{
"id": 8,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (p *IntegrationTestNodePreparer) PrepareNodes() error {\n",
"\tnumNodes := 0\n",
"\tfor k := range p.countToStrategy {\n",
"\t\tnumNodes += k\n",
"\t}\n",
"\n",
"\tglog.Infof(\"Making %d nodes\", numNodes)\n",
"\tbaseNode := &api.Node{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, v := range p.countToStrategy {\n",
"\t\tnumNodes += v.Count\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 48
} | accept-hosts
accept-paths
admission-control
admission-control-config-file
advertise-address
advertised-address
algorithm-provider
all-namespaces
allocate-node-cidrs
allow-privileged
allowed-not-ready-nodes
anonymous-auth
api-advertise-addresses
api-burst
api-external-dns-names
api-port
api-prefix
api-rate
api-server-port
api-servers
api-token
api-version
apiserver-count
apiserver-count
audit-log-maxage
audit-log-maxbackup
audit-log-maxsize
audit-log-path
auth-path
auth-path
auth-provider
auth-provider
auth-provider-arg
auth-provider-arg
authentication-token-webhook-cache-ttl
authentication-token-webhook-config-file
authorization-mode
authorization-policy-file
authorization-rbac-super-user
authorization-webhook-cache-authorized-ttl
authorization-webhook-cache-unauthorized-ttl
authorization-webhook-config-file
babysit-daemons
basic-auth-file
bench-pods
bench-quiet
bench-tasks
bench-workers
bind-address
bind-pods-burst
bind-pods-qps
bounding-dirs
build-dependencies
build-only
build-tag
cadvisor-port
cert-dir
certificate-authority
cgroup-driver
cgroup-root
cgroups-per-qos
chaos-chance
clean-start
cleanup
cleanup-iptables
client-ca-file
client-certificate
client-key
clientset-api-path
clientset-name
clientset-only
clientset-path
cloud-config
cloud-provider
cluster-cidr
cluster-dns
cluster-domain
cluster-ip
cluster-monitor-period
cluster-name
cluster-signing-cert-file
cluster-signing-key-file
cluster-tag
cni-bin-dir
cni-conf-dir
concurrent-deployment-syncs
concurrent-endpoint-syncs
concurrent-gc-syncs
concurrent-namespace-syncs
concurrent-replicaset-syncs
concurrent-resource-quota-syncs
concurrent-service-syncs
concurrent-serviceaccount-token-syncs
config-sync-period
configure-cloud-routes
conntrack-max
conntrack-max-per-core
conntrack-min
conntrack-tcp-timeout-established
consumer-port
consumer-service-name
consumer-service-namespace
contain-pod-resources
container-port
container-runtime
container-runtime-endpoint
controller-start-interval
cors-allowed-origins
cpu-cfs-quota
cpu-percent
create-external-load-balancer
current-release-pr
current-replicas
daemonset-lookup-cache-size
data-dir
default-container-cpu-limit
default-container-mem-limit
delay-shutdown
delete-collection-workers
delete-instances
delete-local-data
delete-namespace
delete-namespace-on-failure
deleting-pods-burst
deleting-pods-qps
deployment-controller-sync-period
deployment-label-key
deserialization-cache-size
dest-file
disable-filter
disable-kubenet
discovery-port
dns-bind-address
dns-port
dns-provider
dns-provider-config
docker-email
docker-endpoint
docker-exec-handler
docker-password
docker-server
docker-username
dockercfg-path
driver-port
drop-embedded-fields
dry-run
dump-logs-on-failure
duration-sec
e2e-output-dir
e2e-verify-service-account
enable-controller-attach-detach
enable-custom-metrics
enable-debugging-handlers
enable-dynamic-provisioning
enable-garbage-collector
enable-garbage-collector
enable-garbage-collector
enable-hostpath-provisioner
enable-server
enable-swagger-ui
etcd-address
etcd-cafile
etcd-certfile
etcd-config
etcd-keyfile
etcd-mutation-timeout
etcd-prefix
etcd-quorum-read
etcd-server
etcd-servers
etcd-servers-overrides
event-burst
event-qps
event-ttl
eviction-hard
eviction-max-pod-grace-period
eviction-minimum-reclaim
eviction-pressure-transition-period
eviction-soft
eviction-soft-grace-period
executor-bindall
executor-logv
executor-path
executor-suicide-timeout
exit-on-lock-contention
experimental-allowed-unsafe-sysctls
experimental-bootstrap-kubeconfig
experimental-keystone-url
experimental-nvidia-gpus
experimental-prefix
experimental-runtime-integration-type
external-etcd-cafile
external-etcd-certfile
external-etcd-endpoints
external-etcd-keyfile
external-hostname
external-ip
extra-peer-dirs
failover-timeout
failure-domains
fake-clientset
feature-gates
federated-api-burst
federated-api-qps
federated-kube-context
federation-name
file-check-frequency
file-suffix
file_content_in_loop
flex-volume-plugin-dir
forward-services
framework-name
framework-store-uri
framework-weburi
from-file
from-literal
func-dest
fuzz-iters
garbage-collector-enabled
gather-logs-sizes
gather-metrics-at-teardown
gather-resource-usage
gce-project
gce-service-account
gce-zone
ginkgo-flags
gke-cluster
go-header-file
google-json-key
grace-period
ha-domain
hairpin-mode
hard
hard-pod-affinity-symmetric-weight
healthz-bind-address
healthz-port
horizontal-pod-autoscaler-sync-period
host-ipc-sources
host-network-sources
host-pid-sources
host-port-endpoints
hostname-override
http-check-frequency
http-port
ignore-daemonsets
ignore-not-found
image-config-file
image-gc-high-threshold
image-gc-low-threshold
image-project
image-pull-policy
image-service-endpoint
include-extended-apis
include-extended-apis
included-types-overrides
input-base
input-dirs
insecure-allow-any-token
insecure-bind-address
insecure-experimental-approve-all-kubelet-csrs-for-group
insecure-port
insecure-skip-tls-verify
instance-metadata
instance-name-prefix
iptables-drop-bit
iptables-masquerade-bit
iptables-sync-period
ir-data-source
ir-dbname
ir-hawkular
ir-influxdb-host
ir-namespace-only
ir-password
ir-user
jenkins-host
jenkins-jobs
junit-file-number
k8s-bin-dir
k8s-build-output
keep-gogoproto
km-path
kops-cluster
kops-kubernetes-version
kops-nodes
kops-ssh-key
kops-state
kops-zones
kube-api-burst
kube-api-content-type
kube-api-qps
kube-master
kube-master
kube-master-url
kube-reserved
kubecfg-file
kubectl-path
kubelet-address
kubelet-api-servers
kubelet-cadvisor-port
kubelet-certificate-authority
kubelet-cgroups
kubelet-client-certificate
kubelet-client-key
kubelet-docker-endpoint
kubelet-enable-debugging-handlers
kubelet-host-network-sources
kubelet-https
kubelet-kubeconfig
kubelet-network-plugin
kubelet-pod-infra-container-image
kubelet-port
kubelet-read-only-port
kubelet-root-dir
kubelet-sync-frequency
kubelet-timeout
kubernetes-service-node-port
label-columns
large-cluster-size-threshold
last-release-pr
leader-elect
leader-elect-lease-duration
leader-elect-renew-deadline
leader-elect-retry-period
lease-duration
leave-stdin-open
limit-bytes
load-balancer-ip
lock-file
log-flush-frequency
long-running-request-regexp
low-diskspace-threshold-mb
make-iptables-util-chains
make-symlinks
manifest-path
manifest-url
manifest-url-header
masquerade-all
master-os-distro
master-service-namespace
max-concurrency
max-connection-bytes-per-sec
max-log-age
max-log-backups
max-log-size
max-open-files
max-outgoing-burst
max-outgoing-qps
max-pods
max-requests-inflight
maximum-dead-containers
maximum-dead-containers-per-container
mesos-authentication-principal
mesos-authentication-provider
mesos-authentication-secret-file
mesos-cgroup-prefix
mesos-default-pod-roles
mesos-executor-cpus
mesos-executor-mem
mesos-framework-roles
mesos-generate-task-discovery
mesos-launch-grace-period
mesos-master
mesos-sandbox-overlay
mesos-user
min-pr-number
min-request-timeout
min-resync-period
minimum-container-ttl-duration
minimum-image-ttl-duration
minion-max-log-age
minion-max-log-backups
minion-max-log-size
minion-path-override
mounter-path
namespace-sync-period
network-plugin
network-plugin-dir
network-plugin-mtu
no-headers
no-headers
no-suggestions
no-suggestions
node-cidr-mask-size
node-eviction-rate
node-instance-group
node-ip
node-labels
node-max-log-age
node-max-log-backups
node-max-log-size
node-monitor-grace-period
node-monitor-period
node-name
node-os-distro
node-path-override
node-port
node-startup-grace-period
node-status-update-frequency
node-sync-period
non-masquerade-cidr
num-nodes
oidc-ca-file
oidc-client-id
oidc-groups-claim
oidc-issuer-url
oidc-username-claim
only-idl
oom-score-adj
out-version
outofdisk-transition-frequency
output-base
output-directory
output-file-base
output-package
output-print-type
output-version
path-override
pod-cidr
pod-eviction-timeout
pod-infra-container-image
pod-manifest-path
pod-network-cidr
pod-running
pods-per-core
policy-config-file
poll-interval
portal-net
prepull-images
private-mountns
prom-push-gateway
protect-kernel-defaults
proto-import
proxy-bindall
proxy-kubeconfig
proxy-logv
proxy-mode
proxy-port-range
public-address-override
pv-recycler-increment-timeout-nfs
pv-recycler-maximum-retry
pv-recycler-minimum-timeout-hostpath
pv-recycler-minimum-timeout-nfs
pv-recycler-pod-template-filepath-hostpath
pv-recycler-pod-template-filepath-nfs
pv-recycler-timeout-increment-hostpath
pvclaimbinder-sync-period
read-only-port
really-crash-for-testing
reconcile-cidr
reconcile-cooldown
reconcile-interval
register-node
register-retry-count
register-schedulable
registry-burst
registry-qps
reject-methods
reject-paths
repair-malformed-updates
replicaset-lookup-cache-size
replication-controller-lookup-cache-size
repo-root
report-dir
report-prefix
require-kubeconfig
required-contexts
resolv-conf
resource-container
resource-quota-sync-period
resource-version
results-dir
retry_time
rkt-api-endpoint
rkt-path
rkt-stage1-image
root-ca-file
root-dir
route-reconciliation-period
run-proxy
run-services-mode
runtime-cgroups
runtime-config
runtime-integration-type
runtime-request-timeout
save-config
schedule-pods-here
scheduler-config
scheduler-name
schema-cache-dir
scopes
seccomp-profile-root
secondary-node-eviction-rate
secure-port
serialize-image-pulls
server-start-timeout
service-account-key-file
service-account-lookup
service-account-private-key-file
service-address
service-cidr
service-cluster-ip-range
service-dns-domain
service-generator
service-node-port-range
service-node-ports
service-overrides
service-sync-period
session-affinity
setup-node
show-all
show-events
show-kind
show-labels
shutdown-fd
shutdown-fifo
since-seconds
since-time
skip-generated-rewrite
skip-munges
skip-preflight-checks
sort-by
source-file
ssh-env
ssh-keyfile
ssh-options
ssh-user
start-services
static-pods-config
stats-port
stop-services
storage-backend
storage-media-type
storage-version
storage-versions
streaming-connection-idle-timeout
suicide-timeout
sync-frequency
system-cgroups
system-container
system-pods-startup-timeout
system-reserved
target-port
target-ram-mb
tcp-services
terminated-pod-gc-threshold
test-flags
test-timeout
tls-ca-file
tls-cert-file
tls-private-key-file
to-version
token-auth-file
ttl-keys-prefix
ttl-secs
type-src
udp-port
udp-timeout
unhealthy-zone-threshold
unix-socket
update-period
upgrade-image
upgrade-target
use-kubernetes-cluster-service
use-kubernetes-version
user-whitelist
verify-only
viper-config
volume-dir
volume-plugin-dir
volume-stats-agg-period
watch-cache
watch-cache-sizes
watch-only
whitelist-override-label
windows-line-endings
www-prefix
zone-name
garbage-collector-enabled
viper-config
log-lines-total
run-duration
| hack/verify-flags/known-flags.txt | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0001791469840100035,
0.0001691380311967805,
0.0001621679257368669,
0.0001698485721135512,
0.0000030909855013305787
] |
{
"id": 8,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (p *IntegrationTestNodePreparer) PrepareNodes() error {\n",
"\tnumNodes := 0\n",
"\tfor k := range p.countToStrategy {\n",
"\t\tnumNodes += k\n",
"\t}\n",
"\n",
"\tglog.Infof(\"Making %d nodes\", numNodes)\n",
"\tbaseNode := &api.Node{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, v := range p.countToStrategy {\n",
"\t\tnumNodes += v.Count\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 48
} | {% if grains.cloud is defined and grains.cloud == 'gce' -%}
# On GCE, there is no Salt mine. We run standalone.
{% else %}
# Allow everyone to see cached values of who sits at what IP
{% set networkInterfaceName = "eth0" %}
{% if grains.networkInterfaceName is defined %}
{% set networkInterfaceName = grains.networkInterfaceName %}
{% endif %}
mine_functions:
network.ip_addrs: [{{networkInterfaceName}}]
grains.items: []
{% endif -%}
| cluster/saltbase/pillar/mine.sls | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017563083383720368,
0.00017214330728165805,
0.00016865579527802765,
0.00017214330728165805,
0.0000034875190522143384
] |
{
"id": 8,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (p *IntegrationTestNodePreparer) PrepareNodes() error {\n",
"\tnumNodes := 0\n",
"\tfor k := range p.countToStrategy {\n",
"\t\tnumNodes += k\n",
"\t}\n",
"\n",
"\tglog.Infof(\"Making %d nodes\", numNodes)\n",
"\tbaseNode := &api.Node{\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tfor _, v := range p.countToStrategy {\n",
"\t\tnumNodes += v.Count\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 48
} | 
A FileSystem Abstraction System for Go
[](https://travis-ci.org/spf13/afero) [](https://ci.appveyor.com/project/spf13/afero) [](https://godoc.org/github.com/spf13/afero) [](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Overview
Afero is an filesystem framework providing a simple, uniform and universal API
interacting with any filesystem, as an abstraction layer providing interfaces,
types and methods. Afero has an exceptionally clean interface and simple design
without needless constructors or initialization methods.
Afero is also a library providing a base set of interoperable backend
filesystems that make it easy to work with afero while retaining all the power
and benefit of the os and ioutil packages.
Afero provides significant improvements over using the os package alone, most
notably the ability to create mock and testing filesystems without relying on the disk.
It is suitable for use in a any situation where you would consider using the OS
package as it provides an additional abstraction that makes it easy to use a
memory backed file system during testing. It also adds support for the http
filesystem for full interoperability.
## Afero Features
* A single consistent API for accessing a variety of filesystems
* Interoperation between a variety of file system types
* A set of interfaces to encourage and enforce interoperability between backends
* An atomic cross platform memory backed file system
* Support for compositional (union) file systems by combining multiple file systems acting as one
* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
* A set of utility functions ported from io, ioutil & hugo to be afero aware
# Using Afero
Afero is easy to use and easier to adopt.
A few different ways you could use Afero:
* Use the interfaces alone to define you own file system.
* Wrap for the OS packages.
* Define different filesystems for different parts of your application.
* Use Afero for mock filesystems while testing
## Step 1: Install Afero
First use go get to install the latest version of the library.
$ go get github.com/spf13/afero
Next include Afero in your application.
```go
import "github.com/spf13/afero"
```
## Step 2: Declare a backend
First define a package variable and set it to a pointer to a filesystem.
```go
var AppFs afero.Fs = afero.NewMemMapFs()
or
var AppFs afero.Fs = afero.NewOsFs()
```
It is important to note that if you repeat the composite literal you
will be using a completely new and isolated filesystem. In the case of
OsFs it will still use the same underlying filesystem but will reduce
the ability to drop in other filesystems as desired.
## Step 3: Use it like you would the OS package
Throughout your application use any function and method like you normally
would.
So if my application before had:
```go
os.Open('/tmp/foo')
```
We would replace it with a call to `AppFs.Open('/tmp/foo')`.
`AppFs` being the variable we defined above.
## List of all available functions
File System Methods Available:
```go
Chmod(name string, mode os.FileMode) : error
Chtimes(name string, atime time.Time, mtime time.Time) : error
Create(name string) : File, error
Mkdir(name string, perm os.FileMode) : error
MkdirAll(path string, perm os.FileMode) : error
Name() : string
Open(name string) : File, error
OpenFile(name string, flag int, perm os.FileMode) : File, error
Remove(name string) : error
RemoveAll(path string) : error
Rename(oldname, newname string) : error
Stat(name string) : os.FileInfo, error
```
File Interfaces and Methods Available:
```go
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() : string
Readdir(count int) : []os.FileInfo, error
Readdirnames(n int) : []string, error
Stat() : os.FileInfo, error
Sync() : error
Truncate(size int64) : error
WriteString(s string) : ret int, err error
```
In some applications it may make sense to define a new package that
simply exports the file system variable for easy access from anywhere.
## Using Afero's utility functions
Afero provides a set of functions to make it easier to use the underlying file systems.
These functions have been primarily ported from io & ioutil with some developed for Hugo.
The afero utilities support all afero compatible backends.
The list of utilities includes:
```go
DirExists(path string) (bool, error)
Exists(path string) (bool, error)
FileContainsBytes(filename string, subslice []byte) (bool, error)
GetTempDir(subPath string) string
IsDir(path string) (bool, error)
IsEmpty(path string) (bool, error)
ReadDir(dirname string) ([]os.FileInfo, error)
ReadFile(filename string) ([]byte, error)
SafeWriteReader(path string, r io.Reader) (err error)
TempDir(dir, prefix string) (name string, err error)
TempFile(dir, prefix string) (f File, err error)
Walk(root string, walkFn filepath.WalkFunc) error
WriteFile(filename string, data []byte, perm os.FileMode) error
WriteReader(path string, r io.Reader) (err error)
```
For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
They are available under two different approaches to use. You can either call
them directly where the first parameter of each function will be the file
system, or you can declare a new `Afero`, a custom type used to bind these
functions as methods to a given filesystem.
### Calling utilities directly
```go
fs := new(afero.MemMapFs)
f, err := afero.TempFile(fs,"", "ioutil-test")
```
### Calling via Afero
```go
fs := afero.NewMemMapFs
afs := &Afero{Fs: fs}
f, err := afs.TempFile("", "ioutil-test")
```
## Using Afero for Testing
There is a large benefit to using a mock filesystem for testing. It has a
completely blank state every time it is initialized and can be easily
reproducible regardless of OS. You could create files to your heart’s content
and the file access would be fast while also saving you from all the annoying
issues with deleting temporary files, Windows file locking, etc. The MemMapFs
backend is perfect for testing.
* Much faster than performing I/O operations on disk
* Avoid security issues and permissions
* Far more control. 'rm -rf /' with confidence
* Test setup is far more easier to do
* No test cleanup needed
One way to accomplish this is to define a variable as mentioned above.
In your application this will be set to afero.NewOsFs() during testing you
can set it to afero.NewMemMapFs().
It wouldn't be uncommon to have each test initialize a blank slate memory
backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
appropriate in my application code. This approach ensures that Tests are order
independent, with no test relying on the state left by an earlier test.
Then in my tests I would initialize a new MemMapFs for each test:
```go
func TestExist(t *testing.T) {
appFS = afero.NewMemMapFs()
// create test files and directories
appFS.MkdirAll("src/a", 0755))
afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
_, err := appFS.Stat("src/c")
if os.IsNotExist(err) {
t.Errorf("file \"%s\" does not exist.\n", name)
}
}
```
# Available Backends
## Operating System Native
### OsFs
The first is simply a wrapper around the native OS calls. This makes it
very easy to use as all of the calls are the same as the existing OS
calls. It also makes it trivial to have your code use the OS during
operation and a mock filesystem during testing or as needed.
```go
appfs := afero.NewOsFs()
appfs.MkdirAll("src/a", 0755))
```
## Memory Backed Storage
### MemMapFs
Afero also provides a fully atomic memory backed filesystem perfect for use in
mocking and to speed up unnecessary disk io when persistence isn’t
necessary. It is fully concurrent and will work within go routines
safely.
```go
mm := afero.NewMemMapFs()
mm.MkdirAll("src/a", 0755))
```
#### InMemoryFile
As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
backed file implementation. This can be used in other memory backed file
systems with ease. Plans are to add a radix tree memory stored file
system using InMemoryFile.
## Network Interfaces
### SftpFs
Afero has experimental support for secure file transfer protocol (sftp). Which can
be used to perform file operations over a encrypted channel.
## Filtering Backends
### BasePathFs
The BasePathFs restricts all operations to a given path within an Fs.
The given file name to the operations on this Fs will be prepended with
the base path before calling the source Fs.
```go
bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
```
### ReadOnlyFs
A thin wrapper around the source Fs providing a read only view.
```go
fs := afero.NewReadOnlyFs(afero.NewOsFs())
_, err := fs.Create("/file.txt")
// err = syscall.EPERM
```
# RegexpFs
A filtered view on file names, any file NOT matching
the passed regexp will be treated as non-existing.
Files not matching the regexp provided will not be created.
Directories are not filtered.
```go
fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
_, err := fs.Create("/file.html")
// err = syscall.ENOENT
```
### HttpFs
Afero provides an http compatible backend which can wrap any of the existing
backends.
The Http package requires a slightly specific version of Open which
returns an http.File type.
Afero provides an httpFs file system which satisfies this requirement.
Any Afero FileSystem can be used as an httpFs.
```go
httpFs := afero.NewHttpFs(<ExistingFS>)
fileserver := http.FileServer(httpFs.Dir(<PATH>)))
http.Handle("/", fileserver)
```
## Composite Backends
Afero provides the ability have two filesystems (or more) act as a single
file system.
### CacheOnReadFs
The CacheOnReadFs will lazily make copies of any accessed files from the base
layer into the overlay. Subsequent reads will be pulled from the overlay
directly permitting the request is within the cache duration of when it was
created in the overlay.
If the base filesystem is writeable, any changes to files will be
done first to the base, then to the overlay layer. Write calls to open file
handles like `Write()` or `Truncate()` to the overlay first.
To writing files to the overlay only, you can use the overlay Fs directly (not
via the union Fs).
Cache files in the layer for the given time.Duration, a cache duration of 0
means "forever" meaning the file will not be re-requested from the base ever.
A read-only base will make the overlay also read-only but still copy files
from the base to the overlay when they're not present (or outdated) in the
caching layer.
```go
base := afero.NewOsFs()
layer := afero.NewMemMapFs()
ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
```
### CopyOnWriteFs()
The CopyOnWriteFs is a read only base file system with a potentially
writeable layer on top.
Read operations will first look in the overlay and if not found there, will
serve the file from the base.
Changes to the file system will only be made in the overlay.
Any attempt to modify a file found only in the base will copy the file to the
overlay layer before modification (including opening a file with a writable
handle).
Removing and Renaming files present only in the base layer is not currently
permitted. If a file is present in the base layer and the overlay, only the
overlay will be removed/renamed.
```go
base := afero.NewOsFs()
roBase := afero.NewReadOnlyFs(base)
ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
fh, _ = ufs.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
```
In this example all write operations will only occur in memory (MemMapFs)
leaving the base filesystem (OsFs) untouched.
## Desired/possible backends
The following is a short list of possible backends we hope someone will
implement:
* SSH
* ZIP
* TAR
* S3
# About the project
## What's in the name
Afero comes from the latin roots Ad-Facere.
**"Ad"** is a prefix meaning "to".
**"Facere"** is a form of the root "faciō" making "make or do".
The literal meaning of afero is "to make" or "to do" which seems very fitting
for a library that allows one to make files and directories and do things with them.
The English word that shares the same roots as Afero is "affair". Affair shares
the same concept but as a noun it means "something that is made or done" or "an
object of a particular type".
It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
Googles very well.
## Release Notes
* **0.10.0** 2015.12.10
* Full compatibility with Windows
* Introduction of afero utilities
* Test suite rewritten to work cross platform
* Normalize paths for MemMapFs
* Adding Sync to the file interface
* **Breaking Change** Walk and ReadDir have changed parameter order
* Moving types used by MemMapFs to a subpackage
* General bugfixes and improvements
* **0.9.0** 2015.11.05
* New Walk function similar to filepath.Walk
* MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
* MemMapFs.Remove now really deletes the file
* InMemoryFile.Readdir and Readdirnames work correctly
* InMemoryFile functions lock it for concurrent access
* Test suite improvements
* **0.8.0** 2014.10.28
* First public version
* Interfaces feel ready for people to build using
* Interfaces satisfy all known uses
* MemMapFs passes the majority of the OS test suite
* OsFs passes the majority of the OS test suite
## Contributing
1. Fork it
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create new Pull Request
## Contributors
Names in no particular order:
* [spf13](https://github.com/spf13)
* [jaqx0r](https://github.com/jaqx0r)
* [mbertschler](https://github.com/mbertschler)
* [xor-gate](https://github.com/xor-gate)
## License
Afero is released under the Apache 2.0 license. See
[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
| vendor/github.com/spf13/afero/README.md | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0006831738282926381,
0.00018691124569159,
0.000159981005708687,
0.0001710121432552114,
0.00008017048094188794
] |
{
"id": 9,
"code_window": [
"\n",
"\tnodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)\n",
"\tindex := 0\n",
"\tsum := 0\n",
"\tfor k, strategy := range p.countToStrategy {\n",
"\t\tsum += k\n",
"\t\tfor ; index < sum; index++ {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep"
],
"after_edit": [
"\tfor _, v := range p.countToStrategy {\n",
"\t\tsum += v.Count\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 82
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"fmt"
"math"
"os"
"time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid"
"github.com/golang/glog"
)
const (
// String used to mark pod deletion
nonExist = "NonExist"
)
type RCConfig struct {
Client *client.Client
Image string
Command []string
Name string
Namespace string
PollInterval time.Duration
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
ReadinessProbe *api.Probe
DNSPolicy *api.DNSPolicy
// Env vars, set the same for every pod.
Env map[string]string
// Extra labels added to every pod.
Labels map[string]string
// Node selector for pods in the RC.
NodeSelector map[string]string
// Ports to declare in the container (map of name to containerPort).
Ports map[string]int
// Ports to declare in the container as host and container ports.
HostPorts map[string]int
Volumes []api.Volume
VolumeMounts []api.VolumeMount
// Pointer to a list of pods; if non-nil, will be set to a list of pods
// created by this RC by RunRC.
CreatedPods *[]*api.Pod
// Maximum allowable container failures. If exceeded, RunRC returns an error.
// Defaults to replicas*0.1 if unspecified.
MaxContainerFailures *int
// If set to false starting RC will print progress, otherwise only errors will be printed.
Silent bool
// If set this function will be used to print log lines instead of glog.
LogFunc func(fmt string, args ...interface{})
// If set those functions will be used to gather data from Nodes - in integration tests where no
// kubelets are running those variables should be nil.
NodeDumpFunc func(c *client.Client, nodeNames []string, logFunc func(fmt string, args ...interface{}))
ContainerDumpFunc func(c *client.Client, ns string, logFunc func(ftm string, args ...interface{}))
}
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
if rc.LogFunc != nil {
rc.LogFunc(fmt, args...)
}
glog.Infof(fmt, args...)
}
type DeploymentConfig struct {
RCConfig
}
type ReplicaSetConfig struct {
RCConfig
}
// podInfo contains pod information useful for debugging e2e tests.
type podInfo struct {
oldHostname string
oldPhase string
hostname string
phase string
}
// PodDiff is a map of pod name to podInfos
type PodDiff map[string]*podInfo
// Print formats and prints the give PodDiff.
func (p PodDiff) String(ignorePhases sets.String) string {
ret := ""
for name, info := range p {
if ignorePhases.Has(info.phase) {
continue
}
if info.phase == nonExist {
ret += fmt.Sprintf("Pod %v was deleted, had phase %v and host %v\n", name, info.oldPhase, info.oldHostname)
continue
}
phaseChange, hostChange := false, false
msg := fmt.Sprintf("Pod %v ", name)
if info.oldPhase != info.phase {
phaseChange = true
if info.oldPhase == nonExist {
msg += fmt.Sprintf("in phase %v ", info.phase)
} else {
msg += fmt.Sprintf("went from phase: %v -> %v ", info.oldPhase, info.phase)
}
}
if info.oldHostname != info.hostname {
hostChange = true
if info.oldHostname == nonExist || info.oldHostname == "" {
msg += fmt.Sprintf("assigned host %v ", info.hostname)
} else {
msg += fmt.Sprintf("went from host: %v -> %v ", info.oldHostname, info.hostname)
}
}
if phaseChange || hostChange {
ret += msg + "\n"
}
}
return ret
}
// Diff computes a PodDiff given 2 lists of pods.
func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
podInfoMap := PodDiff{}
// New pods will show up in the curPods list but not in oldPods. They have oldhostname/phase == nonexist.
for _, pod := range curPods {
podInfoMap[pod.Name] = &podInfo{hostname: pod.Spec.NodeName, phase: string(pod.Status.Phase), oldHostname: nonExist, oldPhase: nonExist}
}
// Deleted pods will show up in the oldPods list but not in curPods. They have a hostname/phase == nonexist.
for _, pod := range oldPods {
if info, ok := podInfoMap[pod.Name]; ok {
info.oldHostname, info.oldPhase = pod.Spec.NodeName, string(pod.Status.Phase)
} else {
podInfoMap[pod.Name] = &podInfo{hostname: nonExist, phase: nonExist, oldHostname: pod.Spec.NodeName, oldPhase: string(pod.Status.Phase)}
}
}
return podInfoMap
}
// RunDeployment Launches (and verifies correctness) of a Deployment
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunDeployment(config DeploymentConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *DeploymentConfig) create() error {
deployment := &extensions.Deployment{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.DeploymentSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&deployment.Spec.Template)
_, err := config.Client.Deployments(config.Namespace).Create(deployment)
if err != nil {
return fmt.Errorf("Error creating deployment: %v", err)
}
config.RCConfigLog("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, deployment.Spec.Replicas)
return nil
}
// RunReplicaSet launches (and verifies correctness) of a ReplicaSet
// and waits until all the pods it launches to reach the "Running" state.
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunReplicaSet(config ReplicaSetConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *ReplicaSetConfig) create() error {
rs := &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: int32(config.Replicas),
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{
"name": config.Name,
},
},
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
},
},
},
},
},
}
config.applyTo(&rs.Spec.Template)
_, err := config.Client.ReplicaSets(config.Namespace).Create(rs)
if err != nil {
return fmt.Errorf("Error creating replica set: %v", err)
}
config.RCConfigLog("Created replica set with name: %v, namespace: %v, replica count: %v", rs.Name, config.Namespace, rs.Spec.Replicas)
return nil
}
// RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running".
// It's the caller's responsibility to clean up externally (i.e. use the
// namespace lifecycle for handling Cleanup).
func RunRC(config RCConfig) error {
err := config.create()
if err != nil {
return err
}
return config.start()
}
func (config *RCConfig) create() error {
dnsDefault := api.DNSDefault
if config.DNSPolicy == nil {
config.DNSPolicy = &dnsDefault
}
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: config.Name,
},
Spec: api.ReplicationControllerSpec{
Replicas: int32(config.Replicas),
Selector: map[string]string{
"name": config.Name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": config.Name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: config.Name,
Image: config.Image,
Command: config.Command,
Ports: []api.ContainerPort{{ContainerPort: 80}},
ReadinessProbe: config.ReadinessProbe,
},
},
DNSPolicy: *config.DNSPolicy,
NodeSelector: config.NodeSelector,
},
},
},
}
config.applyTo(rc.Spec.Template)
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
if err != nil {
return fmt.Errorf("Error creating replication controller: %v", err)
}
config.RCConfigLog("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
return nil
}
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
}
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
}
if config.NodeSelector != nil {
template.Spec.NodeSelector = make(map[string]string)
for k, v := range config.NodeSelector {
template.Spec.NodeSelector[k] = v
}
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
}
}
if config.HostPorts != nil {
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
}
if config.CpuLimit > 0 || config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
if len(config.Volumes) > 0 {
template.Spec.Volumes = config.Volumes
}
if len(config.VolumeMounts) > 0 {
template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
}
}
type RCStartupStatus struct {
Expected int
Terminating int
Running int
RunningButNotReady int
Waiting int
Pending int
Unknown int
Inactive int
FailedContainers int
Created []*api.Pod
ContainerRestartNodes sets.String
}
func (s *RCStartupStatus) String(name string) string {
return fmt.Sprintf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
name, len(s.Created), s.Expected, s.Running, s.Pending, s.Waiting, s.Inactive, s.Terminating, s.Unknown, s.RunningButNotReady)
}
func ComputeRCStartupStatus(pods []*api.Pod, expected int) RCStartupStatus {
startupStatus := RCStartupStatus{
Expected: expected,
Created: make([]*api.Pod, 0, expected),
ContainerRestartNodes: sets.NewString(),
}
for _, p := range pods {
if p.DeletionTimestamp != nil {
startupStatus.Terminating++
continue
}
startupStatus.Created = append(startupStatus.Created, p)
if p.Status.Phase == api.PodRunning {
ready := false
for _, c := range p.Status.Conditions {
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
ready = true
break
}
}
if ready {
// Only count a pod is running when it is also ready.
startupStatus.Running++
} else {
startupStatus.RunningButNotReady++
}
for _, v := range FailedContainers(p) {
startupStatus.FailedContainers = startupStatus.FailedContainers + v.Restarts
startupStatus.ContainerRestartNodes.Insert(p.Spec.NodeName)
}
} else if p.Status.Phase == api.PodPending {
if p.Spec.NodeName == "" {
startupStatus.Waiting++
} else {
startupStatus.Pending++
}
} else if p.Status.Phase == api.PodSucceeded || p.Status.Phase == api.PodFailed {
startupStatus.Inactive++
} else if p.Status.Phase == api.PodUnknown {
startupStatus.Unknown++
}
}
return startupStatus
}
func (config *RCConfig) start() error {
// Don't force tests to fail if they don't care about containers restarting.
var maxContainerFailures int
if config.MaxContainerFailures == nil {
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
} else {
maxContainerFailures = *config.MaxContainerFailures
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
PodStore := NewPodStore(config.Client, config.Namespace, label, fields.Everything())
defer PodStore.Stop()
interval := config.PollInterval
if interval <= 0 {
interval = 10 * time.Second
}
timeout := config.Timeout
if timeout <= 0 {
timeout = 5 * time.Minute
}
oldPods := make([]*api.Pod, 0)
oldRunning := 0
lastChange := time.Now()
for oldRunning != config.Replicas {
time.Sleep(interval)
pods := PodStore.List()
startupStatus := ComputeRCStartupStatus(pods, config.Replicas)
pods = startupStatus.Created
if config.CreatedPods != nil {
*config.CreatedPods = pods
}
if !config.Silent {
config.RCConfigLog(startupStatus.String(config.Name))
}
if config.PodStatusFile != nil {
fmt.Fprintf(config.PodStatusFile, "%d, running, %d, pending, %d, waiting, %d, inactive, %d, unknown, %d, runningButNotReady\n", startupStatus.Running, startupStatus.Pending, startupStatus.Waiting, startupStatus.Inactive, startupStatus.Unknown, startupStatus.RunningButNotReady)
}
if startupStatus.FailedContainers > maxContainerFailures {
if config.NodeDumpFunc != nil {
config.NodeDumpFunc(config.Client, startupStatus.ContainerRestartNodes.List(), config.RCConfigLog)
}
if config.ContainerDumpFunc != nil {
// Get the logs from the failed containers to help diagnose what caused them to fail
config.ContainerDumpFunc(config.Client, config.Namespace, config.RCConfigLog)
}
return fmt.Errorf("%d containers failed which is more than allowed %d", startupStatus.FailedContainers, maxContainerFailures)
}
if len(pods) < len(oldPods) || len(pods) > config.Replicas {
// This failure mode includes:
// kubelet is dead, so node controller deleted pods and rc creates more
// - diagnose by noting the pod diff below.
// pod is unhealthy, so replication controller creates another to take its place
// - diagnose by comparing the previous "2 Pod states" lines for inactive pods
errorStr := fmt.Sprintf("Number of reported pods for %s changed: %d vs %d", config.Name, len(pods), len(oldPods))
config.RCConfigLog("%v, pods that changed since the last iteration:", errorStr)
config.RCConfigLog(Diff(oldPods, pods).String(sets.NewString()))
return fmt.Errorf(errorStr)
}
if len(pods) > len(oldPods) || startupStatus.Running > oldRunning {
lastChange = time.Now()
}
oldPods = pods
oldRunning = startupStatus.Running
if time.Since(lastChange) > timeout {
break
}
}
if oldRunning != config.Replicas {
// List only pods from a given replication controller.
options := api.ListOptions{LabelSelector: label}
if pods, err := config.Client.Pods(api.NamespaceAll).List(options); err == nil {
for _, pod := range pods.Items {
config.RCConfigLog("Pod %s\t%s\t%s\t%s", pod.Name, pod.Spec.NodeName, pod.Status.Phase, pod.DeletionTimestamp)
}
} else {
config.RCConfigLog("Can't list pod debug info: %v", err)
}
return fmt.Errorf("Only %d pods started out of %d", oldRunning, config.Replicas)
}
return nil
}
// Simplified version of RunRC, that does not create RC, but creates plain Pods.
// Optionally waits for pods to start running (if waitForRunning == true).
// The number of replicas must be non-zero.
func StartPods(c *client.Client, replicas int, namespace string, podNamePrefix string,
pod api.Pod, waitForRunning bool, logFunc func(fmt string, args ...interface{})) error {
// no pod to start
if replicas < 1 {
panic("StartPods: number of replicas must be non-zero")
}
startPodsID := string(uuid.NewUUID()) // So that we can label and find them
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.ObjectMeta.Labels["startPodsID"] = startPodsID
pod.Spec.Containers[0].Name = podName
_, err := c.Pods(namespace).Create(&pod)
if err != nil {
return err
}
}
logFunc("Waiting for running...")
if waitForRunning {
label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID}))
err := WaitForPodsWithLabelRunning(c, namespace, label)
if err != nil {
return fmt.Errorf("Error waiting for %d pods to be running - probably a timeout: %v", replicas, err)
}
}
return nil
}
// Wait up to 10 minutes for all matching pods to become Running and at least one
// matching pod exists.
func WaitForPodsWithLabelRunning(c *client.Client, ns string, label labels.Selector) error {
running := false
PodStore := NewPodStore(c, ns, label, fields.Everything())
defer PodStore.Stop()
waitLoop:
for start := time.Now(); time.Since(start) < 10*time.Minute; time.Sleep(5 * time.Second) {
pods := PodStore.List()
if len(pods) == 0 {
continue waitLoop
}
for _, p := range pods {
if p.Status.Phase != api.PodRunning {
continue waitLoop
}
}
running = true
break
}
if !running {
return fmt.Errorf("Timeout while waiting for pods with labels %q to be running", label.String())
}
return nil
}
type TestNodePreparer interface {
PrepareNodes() error
CleanupNodes() error
}
type PrepareNodeStrategy interface {
PreparePatch(node *api.Node) []byte
CleanupNode(node *api.Node) *api.Node
}
type TrivialNodePrepareStrategy struct{}
func (*TrivialNodePrepareStrategy) PreparePatch(*api.Node) []byte {
return []byte{}
}
func (*TrivialNodePrepareStrategy) CleanupNode(node *api.Node) *api.Node {
nodeCopy := *node
return &nodeCopy
}
func DoPrepareNode(client clientset.Interface, node *api.Node, strategy PrepareNodeStrategy) error {
var err error
patch := strategy.PreparePatch(node)
if len(patch) == 0 {
return nil
}
for attempt := 0; attempt < retries; attempt++ {
if _, err = client.Core().Nodes().Patch(node.Name, api.MergePatchType, []byte(patch)); err == nil {
return nil
}
if !apierrs.IsConflict(err) {
return fmt.Errorf("Error while applying patch %v to Node %v: %v", string(patch), node.Name, err)
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("To many conflicts when applying patch %v to Node %v", string(patch), node.Name)
}
func DoCleanupNode(client clientset.Interface, nodeName string, strategy PrepareNodeStrategy) error {
for attempt := 0; attempt < retries; attempt++ {
node, err := client.Core().Nodes().Get(nodeName)
if err != nil {
return fmt.Errorf("Skipping cleanup of Node: failed to get Node %v: %v", nodeName, err)
}
updatedNode := strategy.CleanupNode(node)
if api.Semantic.DeepEqual(node, updatedNode) {
return nil
}
if _, err = client.Core().Nodes().Update(updatedNode); err == nil {
return nil
}
if !apierrs.IsConflict(err) {
return fmt.Errorf("Error when updating Node %v: %v", nodeName, err)
}
time.Sleep(100 * time.Millisecond)
}
return fmt.Errorf("To many conflicts when trying to cleanup Node %v", nodeName)
}
| test/utils/runners.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.9976626634597778,
0.0285703856498003,
0.0001632189378142357,
0.0001726657064864412,
0.1609857678413391
] |
{
"id": 9,
"code_window": [
"\n",
"\tnodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)\n",
"\tindex := 0\n",
"\tsum := 0\n",
"\tfor k, strategy := range p.countToStrategy {\n",
"\t\tsum += k\n",
"\t\tfor ; index < sum; index++ {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep"
],
"after_edit": [
"\tfor _, v := range p.countToStrategy {\n",
"\t\tsum += v.Count\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 82
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package subjectaccessreview
import (
"fmt"
kapi "k8s.io/kubernetes/pkg/api"
kapierrors "k8s.io/kubernetes/pkg/api/errors"
authorizationapi "k8s.io/kubernetes/pkg/apis/authorization"
authorizationvalidation "k8s.io/kubernetes/pkg/apis/authorization/validation"
"k8s.io/kubernetes/pkg/auth/authorizer"
authorizationutil "k8s.io/kubernetes/pkg/registry/authorization/util"
"k8s.io/kubernetes/pkg/runtime"
)
type REST struct {
authorizer authorizer.Authorizer
}
func NewREST(authorizer authorizer.Authorizer) *REST {
return &REST{authorizer}
}
func (r *REST) New() runtime.Object {
return &authorizationapi.SubjectAccessReview{}
}
func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (runtime.Object, error) {
subjectAccessReview, ok := obj.(*authorizationapi.SubjectAccessReview)
if !ok {
return nil, kapierrors.NewBadRequest(fmt.Sprintf("not a SubjectAccessReview: %#v", obj))
}
if errs := authorizationvalidation.ValidateSubjectAccessReview(subjectAccessReview); len(errs) > 0 {
return nil, kapierrors.NewInvalid(authorizationapi.Kind(subjectAccessReview.Kind), "", errs)
}
authorizationAttributes := authorizationutil.AuthorizationAttributesFrom(subjectAccessReview.Spec)
allowed, reason, evaluationErr := r.authorizer.Authorize(authorizationAttributes)
subjectAccessReview.Status = authorizationapi.SubjectAccessReviewStatus{
Allowed: allowed,
Reason: reason,
}
if evaluationErr != nil {
subjectAccessReview.Status.EvaluationError = evaluationErr.Error()
}
return subjectAccessReview, nil
}
| pkg/registry/authorization/subjectaccessreview/rest.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017927703447639942,
0.00017523257702123374,
0.00016983742534648627,
0.00017734493303578347,
0.0000036559640648192726
] |
{
"id": 9,
"code_window": [
"\n",
"\tnodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)\n",
"\tindex := 0\n",
"\tsum := 0\n",
"\tfor k, strategy := range p.countToStrategy {\n",
"\t\tsum += k\n",
"\t\tfor ; index < sum; index++ {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep"
],
"after_edit": [
"\tfor _, v := range p.countToStrategy {\n",
"\t\tsum += v.Count\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 82
} | package afero
import (
"os"
"regexp"
"syscall"
"time"
)
// The RegexpFs filters files (not directories) by regular expression. Only
// files matching the given regexp will be allowed, all others get a ENOENT error (
// "No such file or directory").
//
type RegexpFs struct {
re *regexp.Regexp
source Fs
}
func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
return &RegexpFs{source: source, re: re}
}
type RegexpFile struct {
f File
re *regexp.Regexp
}
func (r *RegexpFs) matchesName(name string) error {
if r.re == nil {
return nil
}
if r.re.MatchString(name) {
return nil
}
return syscall.ENOENT
}
func (r *RegexpFs) dirOrMatches(name string) error {
dir, err := IsDir(r.source, name)
if err != nil {
return err
}
if dir {
return nil
}
return r.matchesName(name)
}
func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chtimes(name, a, m)
}
func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chmod(name, mode)
}
func (r *RegexpFs) Name() string {
return "RegexpFs"
}
func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.Stat(name)
}
func (r *RegexpFs) Rename(oldname, newname string) error {
dir, err := IsDir(r.source, oldname)
if err != nil {
return err
}
if dir {
return nil
}
if err := r.matchesName(oldname); err != nil {
return err
}
if err := r.matchesName(newname); err != nil {
return err
}
return r.source.Rename(oldname, newname)
}
func (r *RegexpFs) RemoveAll(p string) error {
dir, err := IsDir(r.source, p)
if err != nil {
return err
}
if !dir {
if err := r.matchesName(p); err != nil {
return err
}
}
return r.source.RemoveAll(p)
}
func (r *RegexpFs) Remove(name string) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Remove(name)
}
func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.OpenFile(name, flag, perm)
}
func (r *RegexpFs) Open(name string) (File, error) {
dir, err := IsDir(r.source, name)
if err != nil {
return nil, err
}
if !dir {
if err := r.matchesName(name); err != nil {
return nil, err
}
}
f, err := r.source.Open(name)
return &RegexpFile{f: f, re: r.re}, nil
}
func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
return r.source.Mkdir(n, p)
}
func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
return r.source.MkdirAll(n, p)
}
func (r *RegexpFs) Create(name string) (File, error) {
if err := r.matchesName(name); err != nil {
return nil, err
}
return r.source.Create(name)
}
func (f *RegexpFile) Close() error {
return f.f.Close()
}
func (f *RegexpFile) Read(s []byte) (int, error) {
return f.f.Read(s)
}
func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
return f.f.ReadAt(s, o)
}
func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
return f.f.Seek(o, w)
}
func (f *RegexpFile) Write(s []byte) (int, error) {
return f.f.Write(s)
}
func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
return f.f.WriteAt(s, o)
}
func (f *RegexpFile) Name() string {
return f.f.Name()
}
func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
var rfi []os.FileInfo
rfi, err = f.f.Readdir(c)
if err != nil {
return nil, err
}
for _, i := range rfi {
if i.IsDir() || f.re.MatchString(i.Name()) {
fi = append(fi, i)
}
}
return fi, nil
}
func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
fi, err := f.Readdir(c)
if err != nil {
return nil, err
}
for _, s := range fi {
n = append(n, s.Name())
}
return n, nil
}
func (f *RegexpFile) Stat() (os.FileInfo, error) {
return f.f.Stat()
}
func (f *RegexpFile) Sync() error {
return f.f.Sync()
}
func (f *RegexpFile) Truncate(s int64) error {
return f.f.Truncate(s)
}
func (f *RegexpFile) WriteString(s string) (int, error) {
return f.f.WriteString(s)
}
| vendor/github.com/spf13/afero/regexpfs.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00038101067184470594,
0.00018196686869487166,
0.000166893849382177,
0.00017257823492400348,
0.000043541265767998993
] |
{
"id": 9,
"code_window": [
"\n",
"\tnodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)\n",
"\tindex := 0\n",
"\tsum := 0\n",
"\tfor k, strategy := range p.countToStrategy {\n",
"\t\tsum += k\n",
"\t\tfor ; index < sum; index++ {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep"
],
"after_edit": [
"\tfor _, v := range p.countToStrategy {\n",
"\t\tsum += v.Count\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 82
} | apiVersion: v1
kind: Service
metadata:
name: elasticsearch-discovery
labels:
component: elasticsearch
role: master
spec:
selector:
component: elasticsearch
role: master
ports:
- name: transport
port: 9300
protocol: TCP
| examples/elasticsearch/production_cluster/es-discovery-svc.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017761839262675494,
0.00017692369874566793,
0.00017622901941649616,
0.00017692369874566793,
6.94686605129391e-7
] |
{
"id": 10,
"code_window": [
"\t\tfor ; index < sum; index++ {\n",
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {\n",
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"math"
"os"
"sort"
"strconv"
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets"
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
MinSaturationThreshold = 2 * time.Minute
MinPodsPerSecondThroughput = 8
)
// Maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0
type DensityTestConfig struct {
Configs []testutils.RCConfig
Client *client.Client
ClientSet internalclientset.Interface
Namespace string
PollInterval time.Duration
PodCount int
Timeout time.Duration
}
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
var apiserverMem uint64
var controllerMem uint64
var schedulerMem uint64
apiserverCPU := math.MaxFloat32
apiserverMem = math.MaxUint64
controllerCPU := math.MaxFloat32
controllerMem = math.MaxUint64
schedulerCPU := math.MaxFloat32
schedulerMem = math.MaxUint64
framework.Logf("Setting resource constraings for provider: %s", framework.TestContext.Provider)
if framework.ProviderIs("kubemark") {
if numNodes <= 5 {
apiserverCPU = 0.35
apiserverMem = 150 * (1024 * 1024)
controllerCPU = 0.1
controllerMem = 100 * (1024 * 1024)
schedulerCPU = 0.05
schedulerMem = 50 * (1024 * 1024)
} else if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1500 * (1024 * 1024)
controllerCPU = 0.75
controllerMem = 750 * (1024 * 1024)
schedulerCPU = 0.75
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 500 {
apiserverCPU = 2.5
apiserverMem = 3400 * (1024 * 1024)
controllerCPU = 1.3
controllerMem = 1100 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 1000 {
apiserverCPU = 4
apiserverMem = 4000 * (1024 * 1024)
controllerCPU = 3
controllerMem = 2000 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 750 * (1024 * 1024)
}
} else {
if numNodes <= 100 {
// TODO: Investigate higher apiserver consumption and
// potentially revert to 1.5cpu and 1.3GB - see #30871
apiserverCPU = 1.8
apiserverMem = 2200 * (1024 * 1024)
controllerCPU = 0.5
controllerMem = 300 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 150 * (1024 * 1024)
}
}
constraints := make(map[string]framework.ResourceConstraint)
constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 250 * (1024 * 1024),
}
constraints["elasticsearch-logging"] = framework.ResourceConstraint{
CPUConstraint: 2,
// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
MemoryConstraint: 5000 * (1024 * 1024),
}
constraints["heapster"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 1800 * (1024 * 1024),
}
constraints["kibana-logging"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 100 * (1024 * 1024),
}
constraints["kube-proxy"] = framework.ResourceConstraint{
CPUConstraint: 0.1,
MemoryConstraint: 20 * (1024 * 1024),
}
constraints["l7-lb-controller"] = framework.ResourceConstraint{
CPUConstraint: 0.15,
MemoryConstraint: 60 * (1024 * 1024),
}
constraints["influxdb"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 500 * (1024 * 1024),
}
constraints["kube-apiserver"] = framework.ResourceConstraint{
CPUConstraint: apiserverCPU,
MemoryConstraint: apiserverMem,
}
constraints["kube-controller-manager"] = framework.ResourceConstraint{
CPUConstraint: controllerCPU,
MemoryConstraint: controllerMem,
}
constraints["kube-scheduler"] = framework.ResourceConstraint{
CPUConstraint: schedulerCPU,
MemoryConstraint: schedulerMem,
}
return constraints
}
func logPodStartupStatus(c *client.Client, expectedPods int, ns string, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels))
podStore := testutils.NewPodStore(c, ns, label, fields.Everything())
defer podStore.Stop()
ticker := time.NewTicker(period)
defer ticker.Stop()
for {
select {
case <-ticker.C:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
case <-stopCh:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
return
}
}
}
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
defer GinkgoRecover()
// Create a listener for events.
// eLock is a lock protects the events
var eLock sync.Mutex
events := make([](*api.Event), 0)
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dtc.Client.Events(dtc.Namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dtc.Client.Events(dtc.Namespace).Watch(options)
},
},
&api.Event{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
eLock.Lock()
defer eLock.Unlock()
events = append(events, obj.(*api.Event))
},
},
)
stop := make(chan struct{})
go controller.Run(stop)
// Create a listener for api updates
// uLock is a lock protects the updateCount
var uLock sync.Mutex
updateCount := 0
label := labels.SelectorFromSet(labels.Set(map[string]string{"type": "densityPod"}))
_, updateController := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = label
return dtc.Client.Pods(dtc.Namespace).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = label
return dtc.Client.Pods(dtc.Namespace).Watch(options)
},
},
&api.Pod{},
0,
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(_, _ interface{}) {
uLock.Lock()
defer uLock.Unlock()
updateCount++
},
},
)
go updateController.Run(stop)
// Start all replication controllers.
startTime := time.Now()
wg := sync.WaitGroup{}
wg.Add(len(dtc.Configs))
for i := range dtc.Configs {
rcConfig := dtc.Configs[i]
go func() {
defer GinkgoRecover()
framework.ExpectNoError(framework.RunRC(rcConfig))
wg.Done()
}()
}
logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.Client, dtc.PodCount, dtc.Namespace, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
wg.Wait()
startupTime := time.Now().Sub(startTime)
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
By("Waiting for all events to be recorded")
last := -1
current := len(events)
lastCount := -1
currentCount := updateCount
for start := time.Now(); (last < current || lastCount < currentCount) && time.Since(start) < dtc.Timeout; time.Sleep(10 * time.Second) {
func() {
eLock.Lock()
defer eLock.Unlock()
last = current
current = len(events)
}()
func() {
uLock.Lock()
defer uLock.Unlock()
lastCount = currentCount
currentCount = updateCount
}()
}
close(stop)
if current != last {
framework.Logf("Warning: Not all events were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
}
framework.Logf("Found %d events", current)
if currentCount != lastCount {
framework.Logf("Warning: Not all updates were recorded after waiting %.2f minutes", dtc.Timeout.Minutes())
}
framework.Logf("Found %d updates", currentCount)
// Tune the threshold for allowed failures.
badEvents := framework.BadEvents(events)
Expect(badEvents).NotTo(BeNumerically(">", int(math.Floor(0.01*float64(dtc.PodCount)))))
// Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data")
podList, err := dtc.Client.Pods(api.NamespaceAll).List(api.ListOptions{})
framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string)
for _, pod := range podList.Items {
if pod.Namespace == api.NamespaceSystem {
systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
} else {
pausePodAllocation[pod.Spec.NodeName]++
}
}
nodeNames := make([]string, 0)
for k := range pausePodAllocation {
nodeNames = append(nodeNames, k)
}
sort.Strings(nodeNames)
for _, node := range nodeNames {
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
}
return startupTime
}
func cleanupDensityTest(dtc DensityTestConfig) {
defer GinkgoRecover()
By("Deleting ReplicationController")
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc.Configs {
rcName := dtc.Configs[i].Name
rc, err := dtc.Client.ReplicationControllers(dtc.Namespace).Get(rcName)
if err == nil && rc.Spec.Replicas != 0 {
if framework.TestContext.GarbageCollectorEnabled {
By("Cleaning up only the replication controller, garbage collector will clean up the pods")
err := framework.DeleteRCAndWaitForGC(dtc.Client, dtc.Namespace, rcName)
framework.ExpectNoError(err)
} else {
By("Cleaning up the replication controller and pods")
err := framework.DeleteRCAndPods(dtc.Client, dtc.ClientSet, dtc.Namespace, rcName)
framework.ExpectNoError(err)
}
}
}
}
// This test suite can take a long time to run, and can affect or be affected by other tests.
// So by default it is added to the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
// IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
// results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup.
var _ = framework.KubeDescribe("Density", func() {
var c *client.Client
var nodeCount int
var RCName string
var additionalPodsPrefix string
var ns string
var uuid string
var e2eStartupTime time.Duration
var totalPods int
var nodeCpuCapacity int64
var nodeMemCapacity int64
var nodes *api.NodeList
var masters sets.String
// Gathers data prior to framework namespace teardown
AfterEach(func() {
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
if saturationThreshold < MinSaturationThreshold {
saturationThreshold = MinSaturationThreshold
}
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
saturationData := framework.SaturationTime{
TimeToSaturate: e2eStartupTime,
NumberOfNodes: nodeCount,
NumberOfPods: totalPods,
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
}
framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
// Verify latency metrics.
highLatencyRequests, err := framework.HighLatencyRequests(c)
framework.ExpectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
// Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver.
framework.ExpectNoError(framework.VerifySchedulerLatency(c))
})
// Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.).
f := framework.NewDefaultFramework("density")
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
c = f.Client
ns = f.Namespace.Name
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c))
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue()
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value()
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
uuid = string(utiluuid.NewUUID())
framework.ExpectNoError(framework.ResetMetrics(c))
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
framework.Logf("Listing nodes for easy debugging:\n")
for _, node := range nodes.Items {
var internalIP, externalIP string
for _, address := range node.Status.Addresses {
if address.Type == api.NodeInternalIP {
internalIP = address.Address
}
if address.Type == api.NodeExternalIP {
externalIP = address.Address
}
}
framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
}
})
type Density struct {
// Controls if e2e latency tests should be run (they are slow)
runLatencyTest bool
podsPerNode int
// Controls how often the apiserver is polled for pods
interval time.Duration
}
densityTests := []Density{
// TODO: Expose runLatencyTest as ginkgo flag.
{podsPerNode: 3, runLatencyTest: false, interval: 10 * time.Second},
{podsPerNode: 30, runLatencyTest: true, interval: 10 * time.Second},
{podsPerNode: 50, runLatencyTest: false, interval: 10 * time.Second},
{podsPerNode: 95, runLatencyTest: true, interval: 10 * time.Second},
{podsPerNode: 100, runLatencyTest: false, interval: 10 * time.Second},
}
for _, testArg := range densityTests {
feature := "ManualPerformance"
switch testArg.podsPerNode {
case 30:
feature = "Performance"
case 95:
feature = "HighDensityPerformance"
}
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node", feature, testArg.podsPerNode)
itArg := testArg
It(name, func() {
nodePreparer := framework.NewE2ETestNodePreparer(
f.ClientSet,
map[int]testutils.PrepareNodeStrategy{nodeCount: &testutils.TrivialNodePrepareStrategy{}},
)
framework.ExpectNoError(nodePreparer.PrepareNodes())
defer nodePreparer.CleanupNodes()
podsPerNode := itArg.podsPerNode
if podsPerNode == 30 {
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
}
totalPods = podsPerNode * nodeCount
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
timeout := 10 * time.Minute
// TODO: loop to podsPerNode instead of 1 when we're ready.
numberOrRCs := 1
RCConfigs := make([]testutils.RCConfig, numberOrRCs)
for i := 0; i < numberOrRCs; i++ {
RCName := "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = testutils.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.Client),
Name: RCName,
Namespace: ns,
Labels: map[string]string{"type": "densityPod"},
PollInterval: itArg.interval,
PodStatusFile: fileHndl,
Replicas: (totalPods + numberOrRCs - 1) / numberOrRCs,
CpuRequest: nodeCpuCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
}
}
dConfig := DensityTestConfig{
Client: c,
ClientSet: f.ClientSet,
Configs: RCConfigs,
PodCount: totalPods,
Namespace: ns,
PollInterval: itArg.interval,
Timeout: timeout,
}
e2eStartupTime = runDensityTest(dConfig)
if itArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
createTimes := make(map[string]unversioned.Time, 0)
nodes := make(map[string]string, 0)
scheduleTimes := make(map[string]unversioned.Time, 0)
runTimes := make(map[string]unversioned.Time, 0)
watchTimes := make(map[string]unversioned.Time, 0)
var mutex sync.Mutex
checkPod := func(p *api.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == api.PodRunning {
if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = unversioned.Now()
createTimes[p.Name] = p.CreationTimestamp
nodes[p.Name] = p.Spec.NodeName
var startTime unversioned.Time
for _, cs := range p.Status.ContainerStatuses {
if cs.State.Running != nil {
if startTime.Before(cs.State.Running.StartedAt) {
startTime = cs.State.Running.StartedAt
}
}
}
if startTime != unversioned.NewTime(time.Time{}) {
runTimes[p.Name] = startTime
} else {
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
}
}
}
}
additionalPodsPrefix = "density-latency-pod"
latencyPodsStore, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
return c.Pods(ns).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
return c.Pods(ns).Watch(options)
},
},
&api.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod)
Expect(ok).To(Equal(true))
go checkPod(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*api.Pod)
Expect(ok).To(Equal(true))
go checkPod(p)
},
},
)
stopCh := make(chan struct{})
go controller.Run(stopCh)
// Create some additional pods with throughput ~5 pods/sec.
var wg sync.WaitGroup
wg.Add(nodeCount)
// Explicitly set requests here.
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests
// (e.g. 100 pods/node kubemark).
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.Client), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
By("Waiting for all Pods begin observed by the watch...")
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < timeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
close(stopCh)
nodeToLatencyPods := make(map[string]int)
for _, item := range latencyPodsStore.List() {
pod := item.(*api.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
}
for node, count := range nodeToLatencyPods {
if count > 1 {
framework.Logf("%d latency pods scheduled on %s", count, node)
}
}
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": ns,
"source": api.DefaultSchedulerName,
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options)
framework.ExpectNoError(err)
for k := range createTimes {
for _, event := range schedEvents.Items {
if event.InvolvedObject.Name == k {
scheduleTimes[k] = event.FirstTimestamp
break
}
}
}
scheduleLag := make([]framework.PodLatencyData, 0)
startupLag := make([]framework.PodLatencyData, 0)
watchLag := make([]framework.PodLatencyData, 0)
schedToWatchLag := make([]framework.PodLatencyData, 0)
e2eLag := make([]framework.PodLatencyData, 0)
for name, create := range createTimes {
sched, ok := scheduleTimes[name]
Expect(ok).To(Equal(true))
run, ok := runTimes[name]
Expect(ok).To(Equal(true))
watch, ok := watchTimes[name]
Expect(ok).To(Equal(true))
node, ok := nodes[name]
Expect(ok).To(Equal(true))
scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
}
sort.Sort(framework.LatencySlice(scheduleLag))
sort.Sort(framework.LatencySlice(startupLag))
sort.Sort(framework.LatencySlice(watchLag))
sort.Sort(framework.LatencySlice(schedToWatchLag))
sort.Sort(framework.LatencySlice(e2eLag))
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
framework.PrintLatencies(watchLag, "worst watch latencies")
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
// Test whether e2e pod startup time is acceptable.
podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
By("Removing additional replication controllers")
deleteRC := func(i int) {
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, ns, name))
}
workqueue.Parallelize(16, nodeCount, deleteRC)
}
cleanupDensityTest(dConfig)
})
}
// Calculate total number of pods from each node's max-pod
It("[Feature:ManualPerformance] should allow running maximum capacity pods on nodes", func() {
totalPods = 0
for _, n := range nodes.Items {
totalPods += int(n.Status.Capacity.Pods().Value())
}
totalPods -= framework.WaitForStableCluster(c, masters)
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
rcCnt := 1
RCConfigs := make([]testutils.RCConfig, rcCnt)
podsPerRC := int(totalPods / rcCnt)
for i := 0; i < rcCnt; i++ {
if i == rcCnt-1 {
podsPerRC += int(math.Mod(float64(totalPods), float64(rcCnt)))
}
RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = testutils.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.Client),
Name: RCName,
Namespace: ns,
Labels: map[string]string{"type": "densityPod"},
PollInterval: 10 * time.Second,
PodStatusFile: fileHndl,
Replicas: podsPerRC,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
}
}
dConfig := DensityTestConfig{
Client: c,
ClientSet: f.ClientSet,
Configs: RCConfigs,
PodCount: totalPods,
Namespace: ns,
PollInterval: 10 * time.Second,
Timeout: 10 * time.Minute,
}
e2eStartupTime = runDensityTest(dConfig)
cleanupDensityTest(dConfig)
})
})
func createRunningPodFromRC(wg *sync.WaitGroup, c *client.Client, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover()
defer wg.Done()
labels := map[string]string{
"type": podType,
"name": name,
}
rc := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: api.ReplicationControllerSpec{
Replicas: 1,
Selector: labels,
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: labels,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: image,
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: cpuRequest,
api.ResourceMemory: memRequest,
},
},
},
},
DNSPolicy: api.DNSDefault,
},
},
},
}
_, err := c.ReplicationControllers(ns).Create(rc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
framework.Logf("Found pod '%s' running", name)
}
| test/e2e/density.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0015059313736855984,
0.0001942642848007381,
0.00015850634372327477,
0.00017313085845671594,
0.00015451032959390432
] |
{
"id": 10,
"code_window": [
"\t\tfor ; index < sum; index++ {\n",
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {\n",
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
api "k8s.io/client-go/pkg/api"
v1 "k8s.io/client-go/pkg/api/v1"
watch "k8s.io/client-go/pkg/watch"
)
// SecretsGetter has a method to return a SecretInterface.
// A group's client should implement this interface.
type SecretsGetter interface {
Secrets(namespace string) SecretInterface
}
// SecretInterface has methods to work with Secret resources.
type SecretInterface interface {
Create(*v1.Secret) (*v1.Secret, error)
Update(*v1.Secret) (*v1.Secret, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string) (*v1.Secret, error)
List(opts v1.ListOptions) (*v1.SecretList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error)
SecretExpansion
}
// secrets implements SecretInterface
type secrets struct {
client *CoreClient
ns string
}
// newSecrets returns a Secrets
func newSecrets(c *CoreClient, namespace string) *secrets {
return &secrets{
client: c,
ns: namespace,
}
}
// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any.
func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) {
result = &v1.Secret{}
err = c.client.Post().
Namespace(c.ns).
Resource("secrets").
Body(secret).
Do().
Into(result)
return
}
// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any.
func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) {
result = &v1.Secret{}
err = c.client.Put().
Namespace(c.ns).
Resource("secrets").
Name(secret.Name).
Body(secret).
Do().
Into(result)
return
}
// Delete takes name of the secret and deletes it. Returns an error if one occurs.
func (c *secrets) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("secrets").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *secrets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("secrets").
VersionedParams(&listOptions, api.ParameterCodec).
Body(options).
Do().
Error()
}
// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any.
func (c *secrets) Get(name string) (result *v1.Secret, err error) {
result = &v1.Secret{}
err = c.client.Get().
Namespace(c.ns).
Resource("secrets").
Name(name).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Secrets that match those selectors.
func (c *secrets) List(opts v1.ListOptions) (result *v1.SecretList, err error) {
result = &v1.SecretList{}
err = c.client.Get().
Namespace(c.ns).
Resource("secrets").
VersionedParams(&opts, api.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested secrets.
func (c *secrets) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.client.Get().
Prefix("watch").
Namespace(c.ns).
Resource("secrets").
VersionedParams(&opts, api.ParameterCodec).
Watch()
}
// Patch applies the patch and returns the patched secret.
func (c *secrets) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) {
result = &v1.Secret{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("secrets").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}
| staging/src/k8s.io/client-go/kubernetes/typed/core/v1/secret.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00018052614177577198,
0.0001713923702482134,
0.00016501177742611617,
0.0001699011481832713,
0.000005264164883556077
] |
{
"id": 10,
"code_window": [
"\t\tfor ; index < sum; index++ {\n",
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {\n",
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
restclient "k8s.io/kubernetes/pkg/client/restclient"
)
// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface.
// A group's client should implement this interface.
type LocalSubjectAccessReviewsGetter interface {
LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface
}
// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources.
type LocalSubjectAccessReviewInterface interface {
LocalSubjectAccessReviewExpansion
}
// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface
type localSubjectAccessReviews struct {
client restclient.Interface
ns string
}
// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews
func newLocalSubjectAccessReviews(c *AuthorizationClient, namespace string) *localSubjectAccessReviews {
return &localSubjectAccessReviews{
client: c.RESTClient(),
ns: namespace,
}
}
| pkg/client/clientset_generated/release_1_5/typed/authorization/v1beta1/localsubjectaccessreview.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0002601896703708917,
0.00019516734755598009,
0.000175059525645338,
0.00017970350745599717,
0.00003262820973759517
] |
{
"id": 10,
"code_window": [
"\t\tfor ; index < sum; index++ {\n",
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {\n",
"\t\t\t\tglog.Errorf(\"Aborting node preparation: %v\", err)\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {\n"
],
"file_path": "test/integration/framework/perf_utils.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"net/http/httptest"
"net/url"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
utiltesting "k8s.io/kubernetes/pkg/util/testing"
)
func parseSelectorOrDie(s string) fields.Selector {
selector, err := fields.ParseSelector(s)
if err != nil {
panic(err)
}
return selector
}
// buildQueryValues is a convenience function for knowing if a namespace should be in a query param or not
func buildQueryValues(query url.Values) url.Values {
v := url.Values{}
if query != nil {
for key, values := range query {
for _, value := range values {
v.Add(key, value)
}
}
}
return v
}
func buildLocation(resourcePath string, query url.Values) string {
return resourcePath + "?" + query.Encode()
}
func TestListWatchesCanList(t *testing.T) {
fieldSelectorQueryParamName := unversioned.FieldSelectorQueryParam(registered.GroupOrDie(api.GroupName).GroupVersion.String())
table := []struct {
location string
resource string
namespace string
fieldSelector fields.Selector
}{
// Node
{
location: testapi.Default.ResourcePath("nodes", api.NamespaceAll, ""),
resource: "nodes",
namespace: api.NamespaceAll,
fieldSelector: parseSelectorOrDie(""),
},
// pod with "assigned" field selector.
{
location: buildLocation(
testapi.Default.ResourcePath("pods", api.NamespaceAll, ""),
buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})),
resource: "pods",
namespace: api.NamespaceAll,
fieldSelector: fields.Set{"spec.host": ""}.AsSelector(),
},
// pod in namespace "foo"
{
location: buildLocation(
testapi.Default.ResourcePath("pods", "foo", ""),
buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}})),
resource: "pods",
namespace: "foo",
fieldSelector: fields.Set{"spec.host": ""}.AsSelector(),
},
}
for _, item := range table {
handler := utiltesting.FakeHandler{
StatusCode: 500,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector)
// This test merely tests that the correct request is made.
lw.List(api.ListOptions{})
handler.ValidateRequest(t, item.location, "GET", nil)
}
}
func TestListWatchesCanWatch(t *testing.T) {
fieldSelectorQueryParamName := unversioned.FieldSelectorQueryParam(registered.GroupOrDie(api.GroupName).GroupVersion.String())
table := []struct {
rv string
location string
resource string
namespace string
fieldSelector fields.Selector
}{
// Node
{
location: buildLocation(
testapi.Default.ResourcePathWithPrefix("watch", "nodes", api.NamespaceAll, ""),
buildQueryValues(url.Values{})),
rv: "",
resource: "nodes",
namespace: api.NamespaceAll,
fieldSelector: parseSelectorOrDie(""),
},
{
location: buildLocation(
testapi.Default.ResourcePathWithPrefix("watch", "nodes", api.NamespaceAll, ""),
buildQueryValues(url.Values{"resourceVersion": []string{"42"}})),
rv: "42",
resource: "nodes",
namespace: api.NamespaceAll,
fieldSelector: parseSelectorOrDie(""),
},
// pod with "assigned" field selector.
{
location: buildLocation(
testapi.Default.ResourcePathWithPrefix("watch", "pods", api.NamespaceAll, ""),
buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}, "resourceVersion": []string{"0"}})),
rv: "0",
resource: "pods",
namespace: api.NamespaceAll,
fieldSelector: fields.Set{"spec.host": ""}.AsSelector(),
},
// pod with namespace foo and assigned field selector
{
location: buildLocation(
testapi.Default.ResourcePathWithPrefix("watch", "pods", "foo", ""),
buildQueryValues(url.Values{fieldSelectorQueryParamName: []string{"spec.host="}, "resourceVersion": []string{"0"}})),
rv: "0",
resource: "pods",
namespace: "foo",
fieldSelector: fields.Set{"spec.host": ""}.AsSelector(),
},
}
for _, item := range table {
handler := utiltesting.FakeHandler{
StatusCode: 500,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := client.NewOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
lw := NewListWatchFromClient(client, item.resource, item.namespace, item.fieldSelector)
// This test merely tests that the correct request is made.
lw.Watch(api.ListOptions{ResourceVersion: item.rv})
handler.ValidateRequest(t, item.location, "GET", nil)
}
}
| pkg/client/cache/listwatch_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0002512804348953068,
0.00017725249927025288,
0.00016326966579072177,
0.0001735759142320603,
0.000018376993466517888
] |
{
"id": 12,
"code_window": [
"\tc := schedulerConfigFactory.Client\n",
"\n",
"\tnodePreparer := framework.NewIntegrationTestNodePreparer(\n",
"\t\tc,\n",
"\t\tmap[int]testutils.PrepareNodeStrategy{numNodes: &testutils.TrivialNodePrepareStrategy{}},\n",
"\t\t\"scheduler-perf-\",\n",
"\t)\n",
"\tif err := nodePreparer.PrepareNodes(); err != nil {\n",
"\t\tglog.Fatalf(\"%v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},\n"
],
"file_path": "test/integration/scheduler_perf/scheduler_test.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
retries = 5
)
type IntegrationTestNodePreparer struct {
client clientset.Interface
countToStrategy map[int]testutils.PrepareNodeStrategy
nodeNamePrefix string
}
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
return &IntegrationTestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeNamePrefix: nodeNamePrefix,
}
}
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
numNodes := 0
for k := range p.countToStrategy {
numNodes += k
}
glog.Infof("Making %d nodes", numNodes)
baseNode := &api.Node{
ObjectMeta: api.ObjectMeta{
GenerateName: p.nodeNamePrefix,
},
Spec: api.NodeSpec{
// TODO: investigate why this is needed.
ExternalID: "foo",
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: api.NodeRunning,
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
for i := 0; i < numNodes; i++ {
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
glog.Fatalf("Error creating node: %v", err)
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
index := 0
sum := 0
for k, strategy := range p.countToStrategy {
sum += k
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
}
}
return nil
}
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &api.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}
return nil
}
| test/integration/framework/perf_utils.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.012589576654136181,
0.0016670424956828356,
0.00016711662465240806,
0.00017955638759303838,
0.0035174544900655746
] |
{
"id": 12,
"code_window": [
"\tc := schedulerConfigFactory.Client\n",
"\n",
"\tnodePreparer := framework.NewIntegrationTestNodePreparer(\n",
"\t\tc,\n",
"\t\tmap[int]testutils.PrepareNodeStrategy{numNodes: &testutils.TrivialNodePrepareStrategy{}},\n",
"\t\t\"scheduler-perf-\",\n",
"\t)\n",
"\tif err := nodePreparer.PrepareNodes(); err != nil {\n",
"\t\tglog.Fatalf(\"%v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},\n"
],
"file_path": "test/integration/scheduler_perf/scheduler_test.go",
"type": "replace",
"edit_start_line_idx": 85
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
// TODO: remove this when Reflector takes an interface rather than a particular ListOptions as input parameter.
func VersionizeV1ListOptions(in api.ListOptions) (out v1.ListOptions) {
if in.LabelSelector != nil {
out.LabelSelector = in.LabelSelector.String()
} else {
out.LabelSelector = ""
}
if in.FieldSelector != nil {
out.FieldSelector = in.FieldSelector.String()
} else {
out.FieldSelector = ""
}
out.Watch = in.Watch
out.ResourceVersion = in.ResourceVersion
out.TimeoutSeconds = in.TimeoutSeconds
return out
}
| federation/pkg/federation-controller/util/versionize_listoptions.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017955638759303838,
0.00017458150978200138,
0.0001690523640718311,
0.00017546411254443228,
0.00000449835943072685
] |
{
"id": 12,
"code_window": [
"\tc := schedulerConfigFactory.Client\n",
"\n",
"\tnodePreparer := framework.NewIntegrationTestNodePreparer(\n",
"\t\tc,\n",
"\t\tmap[int]testutils.PrepareNodeStrategy{numNodes: &testutils.TrivialNodePrepareStrategy{}},\n",
"\t\t\"scheduler-perf-\",\n",
"\t)\n",
"\tif err := nodePreparer.PrepareNodes(); err != nil {\n",
"\t\tglog.Fatalf(\"%v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},\n"
],
"file_path": "test/integration/scheduler_perf/scheduler_test.go",
"type": "replace",
"edit_start_line_idx": 85
} | package jose
import (
"fmt"
)
type Verifier interface {
ID() string
Alg() string
Verify(sig []byte, data []byte) error
}
type Signer interface {
Verifier
Sign(data []byte) (sig []byte, err error)
}
func NewVerifier(jwk JWK) (Verifier, error) {
if jwk.Type != "RSA" {
return nil, fmt.Errorf("unsupported key type %q", jwk.Type)
}
return NewVerifierRSA(jwk)
}
| staging/src/k8s.io/client-go/_vendor/github.com/coreos/go-oidc/jose/sig.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0002123144658980891,
0.00018312397878617048,
0.00016655365470796824,
0.0001705038157524541,
0.00002070369191642385
] |
{
"id": 12,
"code_window": [
"\tc := schedulerConfigFactory.Client\n",
"\n",
"\tnodePreparer := framework.NewIntegrationTestNodePreparer(\n",
"\t\tc,\n",
"\t\tmap[int]testutils.PrepareNodeStrategy{numNodes: &testutils.TrivialNodePrepareStrategy{}},\n",
"\t\t\"scheduler-perf-\",\n",
"\t)\n",
"\tif err := nodePreparer.PrepareNodes(); err != nil {\n",
"\t\tglog.Fatalf(\"%v\", err)\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},\n"
],
"file_path": "test/integration/scheduler_perf/scheduler_test.go",
"type": "replace",
"edit_start_line_idx": 85
} | kind: ReplicationController
apiVersion: v1
metadata:
name: guestbook
spec:
replicas: 3
template:
metadata:
labels:
component: guestbook
app: vitess
spec:
containers:
- name: guestbook
image: vitess/guestbook:v2.0.0-alpha5
ports:
- name: http-server
containerPort: 8080
resources:
limits:
memory: "128Mi"
cpu: "100m"
| examples/storage/vitess/guestbook-controller.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00017791555728763342,
0.00017650984227657318,
0.00017566332826390862,
0.0001759506412781775,
0.00000100088732324366
] |
{
"id": 13,
"code_window": [
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"type TestNodePreparer interface {\n",
"\tPrepareNodes() error\n",
"\tCleanupNodes() error\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"type CountToStrategy struct {\n",
"\tCount int\n",
"\tStrategy PrepareNodeStrategy\n",
"}\n",
"\n"
],
"file_path": "test/utils/runners.go",
"type": "add",
"edit_start_line_idx": 610
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
e2eframework "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
const (
retries = 5
)
type IntegrationTestNodePreparer struct {
client clientset.Interface
countToStrategy map[int]testutils.PrepareNodeStrategy
nodeNamePrefix string
}
func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy map[int]testutils.PrepareNodeStrategy, nodeNamePrefix string) testutils.TestNodePreparer {
return &IntegrationTestNodePreparer{
client: client,
countToStrategy: countToStrategy,
nodeNamePrefix: nodeNamePrefix,
}
}
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
numNodes := 0
for k := range p.countToStrategy {
numNodes += k
}
glog.Infof("Making %d nodes", numNodes)
baseNode := &api.Node{
ObjectMeta: api.ObjectMeta{
GenerateName: p.nodeNamePrefix,
},
Spec: api.NodeSpec{
// TODO: investigate why this is needed.
ExternalID: "foo",
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
api.ResourceCPU: resource.MustParse("4"),
api.ResourceMemory: resource.MustParse("32Gi"),
},
Phase: api.NodeRunning,
Conditions: []api.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue},
},
},
}
for i := 0; i < numNodes; i++ {
if _, err := p.client.Core().Nodes().Create(baseNode); err != nil {
glog.Fatalf("Error creating node: %v", err)
}
}
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
index := 0
sum := 0
for k, strategy := range p.countToStrategy {
sum += k
for ; index < sum; index++ {
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], strategy); err != nil {
glog.Errorf("Aborting node preparation: %v", err)
return err
}
}
}
return nil
}
func (p *IntegrationTestNodePreparer) CleanupNodes() error {
nodes := e2eframework.GetReadySchedulableNodesOrDie(p.client)
for i := range nodes.Items {
if err := p.client.Core().Nodes().Delete(nodes.Items[i].Name, &api.DeleteOptions{}); err != nil {
glog.Errorf("Error while deleting Node: %v", err)
}
}
return nil
}
| test/integration/framework/perf_utils.go | 1 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.9961634874343872,
0.0923302173614502,
0.00016696914099156857,
0.00017935122014023364,
0.28582963347435
] |
{
"id": 13,
"code_window": [
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"type TestNodePreparer interface {\n",
"\tPrepareNodes() error\n",
"\tCleanupNodes() error\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"type CountToStrategy struct {\n",
"\tCount int\n",
"\tStrategy PrepareNodeStrategy\n",
"}\n",
"\n"
],
"file_path": "test/utils/runners.go",
"type": "add",
"edit_start_line_idx": 610
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package meta_test
import (
"fmt"
"reflect"
"strings"
"testing"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/util/sets"
)
// These types do not follow the list convention as documented in
// docs/devel/api-convention.md
var listTypeExceptions = sets.NewString("APIGroupList", "APIResourceList")
func validateListType(target reflect.Type) error {
// exceptions
if listTypeExceptions.Has(target.Name()) {
return nil
}
hasListSuffix := strings.HasSuffix(target.Name(), "List")
hasMetadata := false
hasItems := false
for i := 0; i < target.NumField(); i++ {
field := target.Field(i)
tag := field.Tag.Get("json")
switch {
case strings.HasPrefix(tag, "metadata"):
hasMetadata = true
case tag == "items":
hasItems = true
if field.Type.Kind() != reflect.Slice {
return fmt.Errorf("Expected items to be slice, got %s", field.Type.Kind())
}
}
}
if hasListSuffix && !hasMetadata {
return fmt.Errorf("Expected type %s to contain \"metadata\"", target.Name())
}
if hasListSuffix && !hasItems {
return fmt.Errorf("Expected type %s to contain \"items\"", target.Name())
}
// if a type contains field Items with JSON tag "items", its name should end with List.
if !hasListSuffix && hasItems {
return fmt.Errorf("Type %s has Items, its name is expected to end with \"List\"", target.Name())
}
return nil
}
// TestListTypes verifies that no external type violates the api convention of
// list types.
func TestListTypes(t *testing.T) {
for groupKey, group := range testapi.Groups {
for kind, target := range group.ExternalTypes() {
t.Logf("working on %v in %v", kind, groupKey)
err := validateListType(target)
if err != nil {
t.Error(err)
}
}
}
}
type WithoutMetaDataList struct {
unversioned.TypeMeta `json:",inline"`
unversioned.ListMeta
Items []interface{} `json:"items"`
}
type WithoutItemsList struct {
unversioned.TypeMeta `json:",inline"`
unversioned.ListMeta `json:"metadata,omitempty"`
}
type WrongItemsJSONTagList struct {
unversioned.TypeMeta `json:",inline"`
unversioned.ListMeta `json:"metadata,omitempty"`
Items []interface{} `json:"items,omitempty"`
}
// If a type has Items, its name should end with "List"
type ListWithWrongName struct {
unversioned.TypeMeta `json:",inline"`
unversioned.ListMeta `json:"metadata,omitempty"`
Items []interface{} `json:"items"`
}
// TestValidateListType verifies the validateListType function reports error on
// types that violate the api convention.
func TestValidateListType(t *testing.T) {
var testTypes = []interface{}{
WithoutMetaDataList{},
WithoutItemsList{},
WrongItemsJSONTagList{},
ListWithWrongName{},
}
for _, testType := range testTypes {
err := validateListType(reflect.TypeOf(testType))
if err == nil {
t.Errorf("Expected error")
}
}
}
| pkg/api/meta/scheme_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0009296267526224256,
0.000235154468100518,
0.0001669420744292438,
0.0001710576325422153,
0.0002010158495977521
] |
{
"id": 13,
"code_window": [
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"type TestNodePreparer interface {\n",
"\tPrepareNodes() error\n",
"\tCleanupNodes() error\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"type CountToStrategy struct {\n",
"\tCount int\n",
"\tStrategy PrepareNodeStrategy\n",
"}\n",
"\n"
],
"file_path": "test/utils/runners.go",
"type": "add",
"edit_start_line_idx": 610
} | package procfs
import (
"bufio"
"fmt"
"strconv"
"strings"
)
// Stat represents kernel/system statistics.
type Stat struct {
// Boot time in seconds since the Epoch.
BootTime int64
}
// NewStat returns kernel/system statistics read from /proc/stat.
func NewStat() (Stat, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return Stat{}, err
}
return fs.NewStat()
}
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
f, err := fs.open("stat")
if err != nil {
return Stat{}, err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
line := s.Text()
if !strings.HasPrefix(line, "btime") {
continue
}
fields := strings.Fields(line)
if len(fields) != 2 {
return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
}
i, err := strconv.ParseInt(fields[1], 10, 32)
if err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
}
return Stat{BootTime: i}, nil
}
if err := s.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
}
return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
}
| vendor/github.com/prometheus/procfs/stat.go | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.0003825122839771211,
0.00020568691252265126,
0.0001659509289311245,
0.00017089737229980528,
0.0000791221100371331
] |
{
"id": 13,
"code_window": [
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"type TestNodePreparer interface {\n",
"\tPrepareNodes() error\n",
"\tCleanupNodes() error\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"type CountToStrategy struct {\n",
"\tCount int\n",
"\tStrategy PrepareNodeStrategy\n",
"}\n",
"\n"
],
"file_path": "test/utils/runners.go",
"type": "add",
"edit_start_line_idx": 610
} | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
all: push
# 0.0 shouldn't clobber any released builds
TAG = 1.0
PREFIX = gcr.io/google_containers/clusterapi-tester
main: main.go
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' -o main ./main.go
container: main
docker build -t $(PREFIX):$(TAG) .
push: container
gcloud docker -- push $(PREFIX):$(TAG)
clean:
rm -f main
| test/images/clusterapi-tester/Makefile | 0 | https://github.com/kubernetes/kubernetes/commit/aeba0f1dc4f3e767fca9dcd5325333eb2eee0480 | [
0.00018015557725448161,
0.00017549149924889207,
0.00016886596858967096,
0.00017647224012762308,
0.000004298708063288359
] |
{
"id": 0,
"code_window": [
"\treturn &lbRule, nil\n",
"}\n",
"\n",
"func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {\n",
"\tvalue := v.(string)\n",
"\tif !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`^[a-zA-Z_0-9.-]+$`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 326
} | package azurerm
import (
"fmt"
"log"
"regexp"
"time"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/jen20/riviera/azure"
)
func resourceArmLoadBalancerRule() *schema.Resource {
return &schema.Resource{
Create: resourceArmLoadBalancerRuleCreate,
Read: resourceArmLoadBalancerRuleRead,
Update: resourceArmLoadBalancerRuleCreate,
Delete: resourceArmLoadBalancerRuleDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateArmLoadBalancerRuleName,
},
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: azureRMNormalizeLocation,
},
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"loadbalancer_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"frontend_ip_configuration_name": {
Type: schema.TypeString,
Required: true,
},
"frontend_ip_configuration_id": {
Type: schema.TypeString,
Computed: true,
},
"backend_address_pool_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"protocol": {
Type: schema.TypeString,
Required: true,
},
"frontend_port": {
Type: schema.TypeInt,
Required: true,
},
"backend_port": {
Type: schema.TypeInt,
Required: true,
},
"probe_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"enable_floating_ip": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"idle_timeout_in_minutes": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"load_distribution": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func resourceArmLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
_, _, exists = findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if exists {
return fmt.Errorf("A LoadBalancer Rule with name %q already exists.", d.Get("name").(string))
}
newLbRule, err := expandAzureRmLoadBalancerRule(d, loadBalancer)
if err != nil {
return errwrap.Wrapf("Error Exanding LoadBalancer Rule {{err}}", err)
}
lbRules := append(*loadBalancer.Properties.LoadBalancingRules, *newLbRule)
loadBalancer.Properties.LoadBalancingRules = &lbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
var rule_id string
for _, LoadBalancingRule := range *(*read.Properties).LoadBalancingRules {
if *LoadBalancingRule.Name == d.Get("name").(string) {
rule_id = *LoadBalancingRule.ID
}
}
if rule_id != "" {
d.SetId(rule_id)
} else {
return fmt.Errorf("Cannot find created LoadBalancer Rule ID %q", rule_id)
}
log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted", "Updating"},
Target: []string{"Succeeded"},
Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName),
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err)
}
return resourceArmLoadBalancerRuleRead(d, meta)
}
func resourceArmLoadBalancerRuleRead(d *schema.ResourceData, meta interface{}) error {
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
configs := *loadBalancer.Properties.LoadBalancingRules
for _, config := range configs {
if *config.Name == d.Get("name").(string) {
d.Set("name", config.Name)
d.Set("protocol", config.Properties.Protocol)
d.Set("frontend_port", config.Properties.FrontendPort)
d.Set("backend_port", config.Properties.BackendPort)
if config.Properties.EnableFloatingIP != nil {
d.Set("enable_floating_ip", config.Properties.EnableFloatingIP)
}
if config.Properties.IdleTimeoutInMinutes != nil {
d.Set("idle_timeout_in_minutes", config.Properties.IdleTimeoutInMinutes)
}
if config.Properties.FrontendIPConfiguration != nil {
d.Set("frontend_ip_configuration_id", config.Properties.FrontendIPConfiguration.ID)
}
if config.Properties.BackendAddressPool != nil {
d.Set("backend_address_pool_id", config.Properties.BackendAddressPool.ID)
}
if config.Properties.Probe != nil {
d.Set("probe_id", config.Properties.Probe.ID)
}
if config.Properties.LoadDistribution != "" {
d.Set("load_distribution", config.Properties.LoadDistribution)
}
}
}
return nil
}
func resourceArmLoadBalancerRuleDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
return nil
}
_, index, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if !exists {
return nil
}
oldLbRules := *loadBalancer.Properties.LoadBalancingRules
newLbRules := append(oldLbRules[:index], oldLbRules[index+1:]...)
loadBalancer.Properties.LoadBalancingRules = &newLbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
return nil
}
func expandAzureRmLoadBalancerRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.LoadBalancingRule, error) {
properties := network.LoadBalancingRulePropertiesFormat{
Protocol: network.TransportProtocol(d.Get("protocol").(string)),
FrontendPort: azure.Int32(int32(d.Get("frontend_port").(int))),
BackendPort: azure.Int32(int32(d.Get("backend_port").(int))),
EnableFloatingIP: azure.Bool(d.Get("enable_floating_ip").(bool)),
}
if v, ok := d.GetOk("idle_timeout_in_minutes"); ok {
properties.IdleTimeoutInMinutes = azure.Int32(int32(v.(int)))
}
if v := d.Get("load_distribution").(string); v != "" {
properties.LoadDistribution = network.LoadDistribution(v)
}
if v := d.Get("frontend_ip_configuration_name").(string); v != "" {
rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v)
if !exists {
return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v)
}
feip := network.SubResource{
ID: rule.ID,
}
properties.FrontendIPConfiguration = &feip
}
if v := d.Get("backend_address_pool_id").(string); v != "" {
beAP := network.SubResource{
ID: &v,
}
properties.BackendAddressPool = &beAP
}
if v := d.Get("probe_id").(string); v != "" {
pid := network.SubResource{
ID: &v,
}
properties.Probe = &pid
}
lbRule := network.LoadBalancingRule{
Name: azure.String(d.Get("name").(string)),
Properties: &properties,
}
return &lbRule, nil
}
func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only word characters and hyphens allowed in %q: %q",
k, value))
}
if len(value) > 80 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 80 characters: %q", k, value))
}
if len(value) == 0 {
errors = append(errors, fmt.Errorf(
"%q cannot be an empty string: %q", k, value))
}
if !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must end with a word character: %q", k, value))
}
if !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must start with a word character: %q", k, value))
}
return
}
| builtin/providers/azurerm/resource_arm_loadbalancer_rule.go | 1 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.9991984963417053,
0.2618231177330017,
0.00016740562568884343,
0.008637451566755772,
0.4143628478050232
] |
{
"id": 0,
"code_window": [
"\treturn &lbRule, nil\n",
"}\n",
"\n",
"func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {\n",
"\tvalue := v.(string)\n",
"\tif !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`^[a-zA-Z_0-9.-]+$`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 326
} | package cloudstack
import "github.com/xanzy/go-cloudstack/cloudstack"
// Config is the configuration structure used to instantiate a
// new CloudStack client.
type Config struct {
APIURL string
APIKey string
SecretKey string
HTTPGETOnly bool
Timeout int64
}
// NewClient returns a new CloudStack client.
func (c *Config) NewClient() (*cloudstack.CloudStackClient, error) {
cs := cloudstack.NewAsyncClient(c.APIURL, c.APIKey, c.SecretKey, false)
cs.HTTPGETOnly = c.HTTPGETOnly
cs.AsyncTimeout(c.Timeout)
return cs, nil
}
| builtin/providers/cloudstack/config.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00016958480409812182,
0.00016793205577414483,
0.00016665691509842873,
0.00016755444812588394,
0.0000012247651284269523
] |
{
"id": 0,
"code_window": [
"\treturn &lbRule, nil\n",
"}\n",
"\n",
"func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {\n",
"\tvalue := v.(string)\n",
"\tif !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`^[a-zA-Z_0-9.-]+$`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 326
} | package rules
import "github.com/rackspace/gophercloud"
const rootPath = "security-group-rules"
func rootURL(c *gophercloud.ServiceClient) string {
return c.ServiceURL(rootPath)
}
func resourceURL(c *gophercloud.ServiceClient, id string) string {
return c.ServiceURL(rootPath, id)
}
| vendor/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00016845764184836298,
0.00016745945322327316,
0.0001664612500462681,
0.00016745945322327316,
9.981959010474384e-7
] |
{
"id": 0,
"code_window": [
"\treturn &lbRule, nil\n",
"}\n",
"\n",
"func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {\n",
"\tvalue := v.(string)\n",
"\tif !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`^[a-zA-Z_0-9.-]+$`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 326
} | Copyright 2012-2013 Rackspace, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use
this file except in compliance with the License. You may obtain a copy of the
License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
| vendor/github.com/rackspace/gophercloud/LICENSE | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.000179377049789764,
0.00017454285989515483,
0.00016566966951359063,
0.000175447145011276,
0.0000030794160466030007
] |
{
"id": 1,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"only word characters and hyphens allowed in %q: %q\",\n",
"\t\t\tk, value))\n",
"\t}\n",
"\n",
"\tif len(value) > 80 {\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"only word characters, numbers, underscores, periods, and hyphens allowed in %q: %q\",\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 328
} | package azurerm
import (
"fmt"
"log"
"regexp"
"time"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/jen20/riviera/azure"
)
func resourceArmLoadBalancerRule() *schema.Resource {
return &schema.Resource{
Create: resourceArmLoadBalancerRuleCreate,
Read: resourceArmLoadBalancerRuleRead,
Update: resourceArmLoadBalancerRuleCreate,
Delete: resourceArmLoadBalancerRuleDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateArmLoadBalancerRuleName,
},
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: azureRMNormalizeLocation,
},
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"loadbalancer_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"frontend_ip_configuration_name": {
Type: schema.TypeString,
Required: true,
},
"frontend_ip_configuration_id": {
Type: schema.TypeString,
Computed: true,
},
"backend_address_pool_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"protocol": {
Type: schema.TypeString,
Required: true,
},
"frontend_port": {
Type: schema.TypeInt,
Required: true,
},
"backend_port": {
Type: schema.TypeInt,
Required: true,
},
"probe_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"enable_floating_ip": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"idle_timeout_in_minutes": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"load_distribution": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func resourceArmLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
_, _, exists = findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if exists {
return fmt.Errorf("A LoadBalancer Rule with name %q already exists.", d.Get("name").(string))
}
newLbRule, err := expandAzureRmLoadBalancerRule(d, loadBalancer)
if err != nil {
return errwrap.Wrapf("Error Exanding LoadBalancer Rule {{err}}", err)
}
lbRules := append(*loadBalancer.Properties.LoadBalancingRules, *newLbRule)
loadBalancer.Properties.LoadBalancingRules = &lbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
var rule_id string
for _, LoadBalancingRule := range *(*read.Properties).LoadBalancingRules {
if *LoadBalancingRule.Name == d.Get("name").(string) {
rule_id = *LoadBalancingRule.ID
}
}
if rule_id != "" {
d.SetId(rule_id)
} else {
return fmt.Errorf("Cannot find created LoadBalancer Rule ID %q", rule_id)
}
log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted", "Updating"},
Target: []string{"Succeeded"},
Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName),
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err)
}
return resourceArmLoadBalancerRuleRead(d, meta)
}
func resourceArmLoadBalancerRuleRead(d *schema.ResourceData, meta interface{}) error {
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
configs := *loadBalancer.Properties.LoadBalancingRules
for _, config := range configs {
if *config.Name == d.Get("name").(string) {
d.Set("name", config.Name)
d.Set("protocol", config.Properties.Protocol)
d.Set("frontend_port", config.Properties.FrontendPort)
d.Set("backend_port", config.Properties.BackendPort)
if config.Properties.EnableFloatingIP != nil {
d.Set("enable_floating_ip", config.Properties.EnableFloatingIP)
}
if config.Properties.IdleTimeoutInMinutes != nil {
d.Set("idle_timeout_in_minutes", config.Properties.IdleTimeoutInMinutes)
}
if config.Properties.FrontendIPConfiguration != nil {
d.Set("frontend_ip_configuration_id", config.Properties.FrontendIPConfiguration.ID)
}
if config.Properties.BackendAddressPool != nil {
d.Set("backend_address_pool_id", config.Properties.BackendAddressPool.ID)
}
if config.Properties.Probe != nil {
d.Set("probe_id", config.Properties.Probe.ID)
}
if config.Properties.LoadDistribution != "" {
d.Set("load_distribution", config.Properties.LoadDistribution)
}
}
}
return nil
}
func resourceArmLoadBalancerRuleDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
return nil
}
_, index, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if !exists {
return nil
}
oldLbRules := *loadBalancer.Properties.LoadBalancingRules
newLbRules := append(oldLbRules[:index], oldLbRules[index+1:]...)
loadBalancer.Properties.LoadBalancingRules = &newLbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
return nil
}
func expandAzureRmLoadBalancerRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.LoadBalancingRule, error) {
properties := network.LoadBalancingRulePropertiesFormat{
Protocol: network.TransportProtocol(d.Get("protocol").(string)),
FrontendPort: azure.Int32(int32(d.Get("frontend_port").(int))),
BackendPort: azure.Int32(int32(d.Get("backend_port").(int))),
EnableFloatingIP: azure.Bool(d.Get("enable_floating_ip").(bool)),
}
if v, ok := d.GetOk("idle_timeout_in_minutes"); ok {
properties.IdleTimeoutInMinutes = azure.Int32(int32(v.(int)))
}
if v := d.Get("load_distribution").(string); v != "" {
properties.LoadDistribution = network.LoadDistribution(v)
}
if v := d.Get("frontend_ip_configuration_name").(string); v != "" {
rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v)
if !exists {
return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v)
}
feip := network.SubResource{
ID: rule.ID,
}
properties.FrontendIPConfiguration = &feip
}
if v := d.Get("backend_address_pool_id").(string); v != "" {
beAP := network.SubResource{
ID: &v,
}
properties.BackendAddressPool = &beAP
}
if v := d.Get("probe_id").(string); v != "" {
pid := network.SubResource{
ID: &v,
}
properties.Probe = &pid
}
lbRule := network.LoadBalancingRule{
Name: azure.String(d.Get("name").(string)),
Properties: &properties,
}
return &lbRule, nil
}
func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only word characters and hyphens allowed in %q: %q",
k, value))
}
if len(value) > 80 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 80 characters: %q", k, value))
}
if len(value) == 0 {
errors = append(errors, fmt.Errorf(
"%q cannot be an empty string: %q", k, value))
}
if !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must end with a word character: %q", k, value))
}
if !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must start with a word character: %q", k, value))
}
return
}
| builtin/providers/azurerm/resource_arm_loadbalancer_rule.go | 1 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.9927577972412109,
0.0707031711935997,
0.00016449243412353098,
0.0001703175948932767,
0.2388887107372284
] |
{
"id": 1,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"only word characters and hyphens allowed in %q: %q\",\n",
"\t\t\tk, value))\n",
"\t}\n",
"\n",
"\tif len(value) > 80 {\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"only word characters, numbers, underscores, periods, and hyphens allowed in %q: %q\",\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 328
} | <% wrap_layout :inner do %>
<% content_for :sidebar do %>
<div class="docs-sidebar hidden-print affix-top" role="complementary">
<ul class="nav docs-sidenav">
<li<%= sidebar_current("docs-home") %>>
<a href="/docs/providers/index.html">« Documentation Home</a>
</li>
<li<%= sidebar_current("docs-template-index") %>>
<a href="/docs/providers/template/index.html">Template Provider</a>
</li>
<li<%= sidebar_current(/^docs-template-datasource/) %>>
<a href="#">Data Sources</a>
<ul class="nav nav-visible">
<li<%= sidebar_current("docs-template-datasource-file") %>>
<a href="/docs/providers/template/d/file.html">template_file</a>
</li>
<li<%= sidebar_current("docs-template-datasource-cloudinit-config") %>>
<a href="/docs/providers/template/d/cloudinit_config.html">template_cloudinit_config</a>
</li>
</ul>
</li>
</ul>
</div>
<% end %>
<%= yield %>
<% end %>
| website/source/layouts/template.erb | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00017113855574280024,
0.00016901735216379166,
0.00016608436999376863,
0.00016982915985863656,
0.000002141722006854252
] |
{
"id": 1,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"only word characters and hyphens allowed in %q: %q\",\n",
"\t\t\tk, value))\n",
"\t}\n",
"\n",
"\tif len(value) > 80 {\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"only word characters, numbers, underscores, periods, and hyphens allowed in %q: %q\",\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 328
} | package awspolicy
import (
"encoding/json"
"reflect"
"strings"
"github.com/hashicorp/errwrap"
)
// PoliciesAreEquivalent tests for the structural equivalence of two
// AWS policies. It does not read into the semantics, other than treating
// single element string arrays as equivalent to a string without an
// array, as the AWS endpoints do.
//
// It will, however, detect reordering and ignore whitespace.
//
// Returns true if the policies are structurally equivalent, false
// otherwise. If either of the input strings are not valid JSON,
// false is returned along with an error.
func PoliciesAreEquivalent(policy1, policy2 string) (bool, error) {
policy1doc := &awsPolicyDocument{}
if err := json.Unmarshal([]byte(policy1), policy1doc); err != nil {
return false, errwrap.Wrapf("Error unmarshaling policy: {{err}}", err)
}
policy2doc := &awsPolicyDocument{}
if err := json.Unmarshal([]byte(policy2), policy2doc); err != nil {
return false, errwrap.Wrapf("Error unmarshaling policy: {{err}}", err)
}
return policy1doc.equals(policy2doc), nil
}
type awsPolicyDocument struct {
Version string `json:",omitempty"`
Id string `json:",omitempty"`
Statements []*awsPolicyStatement `json:"Statement"`
}
func (doc *awsPolicyDocument) equals(other *awsPolicyDocument) bool {
// Check the basic fields of the document
if doc.Version != other.Version {
return false
}
if doc.Id != other.Id {
return false
}
// If we have different number of statements we are very unlikely
// to have them be equivalent.
if len(doc.Statements) != len(other.Statements) {
return false
}
// If we have the same number of statements in the policy, does
// each statement in the doc have a corresponding statement in
// other which is equal? If no, policies are not equal, if yes,
// then they may be.
for _, ours := range doc.Statements {
found := false
for _, theirs := range other.Statements {
if ours.equals(theirs) {
found = true
}
}
if !found {
return false
}
}
// Now we need to repeat this process the other way around to
// ensure we don't have any matching errors.
for _, theirs := range other.Statements {
found := false
for _, ours := range doc.Statements {
if theirs.equals(ours) {
found = true
}
}
if !found {
return false
}
}
return true
}
type awsPolicyStatement struct {
Sid string `json:",omitempty"`
Effect string `json:",omitempty"`
Actions interface{} `json:"Action,omitempty"`
NotActions interface{} `json:"NotAction,omitempty"`
Resources interface{} `json:"Resource,omitempty"`
NotResources interface{} `json:"NotResource,omitempty"`
Principals interface{} `json:"Principal,omitempty"`
NotPrincipals interface{} `json:"NotPrincipal,omitempty"`
Conditions map[string]map[string]interface{} `json:"Condition,omitempty"`
}
func (statement *awsPolicyStatement) equals(other *awsPolicyStatement) bool {
if statement.Sid != other.Sid {
return false
}
if strings.ToLower(statement.Effect) != strings.ToLower(other.Effect) {
return false
}
ourActions := newAWSStringSet(statement.Actions)
theirActions := newAWSStringSet(other.Actions)
if !ourActions.equals(theirActions) {
return false
}
ourNotActions := newAWSStringSet(statement.NotActions)
theirNotActions := newAWSStringSet(other.NotActions)
if !ourNotActions.equals(theirNotActions) {
return false
}
ourResources := newAWSStringSet(statement.Resources)
theirResources := newAWSStringSet(other.Resources)
if !ourResources.equals(theirResources) {
return false
}
ourNotResources := newAWSStringSet(statement.NotResources)
theirNotResources := newAWSStringSet(other.NotResources)
if !ourNotResources.equals(theirNotResources) {
return false
}
ourConditionsBlock := awsConditionsBlock(statement.Conditions)
theirConditionsBlock := awsConditionsBlock(other.Conditions)
if !ourConditionsBlock.Equals(theirConditionsBlock) {
return false
}
if statement.Principals != nil || other.Principals != nil {
stringPrincipalsEqual := stringPrincipalsEqual(statement.Principals, other.Principals)
mapPrincipalsEqual := mapPrincipalsEqual(statement.Principals, other.Principals)
if !(stringPrincipalsEqual || mapPrincipalsEqual) {
return false
}
}
if statement.NotPrincipals != nil || other.NotPrincipals != nil {
stringNotPrincipalsEqual := stringPrincipalsEqual(statement.NotPrincipals, other.NotPrincipals)
mapNotPrincipalsEqual := mapPrincipalsEqual(statement.NotPrincipals, other.NotPrincipals)
if !(stringNotPrincipalsEqual || mapNotPrincipalsEqual) {
return false
}
}
return true
}
func mapPrincipalsEqual(ours, theirs interface{}) bool {
ourPrincipalMap, ok := ours.(map[string]interface{})
if !ok {
return false
}
theirPrincipalMap, ok := theirs.(map[string]interface{})
if !ok {
return false
}
oursNormalized := make(map[string]awsStringSet)
for key, val := range ourPrincipalMap {
oursNormalized[key] = newAWSStringSet(val)
}
theirsNormalized := make(map[string]awsStringSet)
for key, val := range theirPrincipalMap {
theirsNormalized[key] = newAWSStringSet(val)
}
for key, ours := range oursNormalized {
theirs, ok := theirsNormalized[key]
if !ok {
return false
}
if !ours.equals(theirs) {
return false
}
}
for key, theirs := range theirsNormalized {
ours, ok := oursNormalized[key]
if !ok {
return false
}
if !theirs.equals(ours) {
return false
}
}
return true
}
func stringPrincipalsEqual(ours, theirs interface{}) bool {
ourPrincipal, oursIsString := ours.(string)
theirPrincipal, theirsIsString := theirs.(string)
if !(oursIsString && theirsIsString) {
return false
}
if ourPrincipal == theirPrincipal {
return true
}
return false
}
type awsConditionsBlock map[string]map[string]interface{}
func (conditions awsConditionsBlock) Equals(other awsConditionsBlock) bool {
if conditions == nil && other != nil || other == nil && conditions != nil {
return false
}
if len(conditions) != len(other) {
return false
}
oursNormalized := make(map[string]map[string]awsStringSet)
for key, condition := range conditions {
normalizedCondition := make(map[string]awsStringSet)
for innerKey, val := range condition {
normalizedCondition[innerKey] = newAWSStringSet(val)
}
oursNormalized[key] = normalizedCondition
}
theirsNormalized := make(map[string]map[string]awsStringSet)
for key, condition := range other {
normalizedCondition := make(map[string]awsStringSet)
for innerKey, val := range condition {
normalizedCondition[innerKey] = newAWSStringSet(val)
}
theirsNormalized[key] = normalizedCondition
}
for key, ours := range oursNormalized {
theirs, ok := theirsNormalized[key]
if !ok {
return false
}
for innerKey, oursInner := range ours {
theirsInner, ok := theirs[innerKey]
if !ok {
return false
}
if !oursInner.equals(theirsInner) {
return false
}
}
}
for key, theirs := range theirsNormalized {
ours, ok := oursNormalized[key]
if !ok {
return false
}
for innerKey, theirsInner := range theirs {
oursInner, ok := ours[innerKey]
if !ok {
return false
}
if !theirsInner.equals(oursInner) {
return false
}
}
}
return true
}
type awsStringSet []string
// newAWSStringSet constructs an awsStringSet from an interface{} - which
// may be nil, a single string, or []interface{} (each of which is a string).
// This corresponds with how structures come off the JSON unmarshaler
// without any custom encoding rules.
func newAWSStringSet(members interface{}) awsStringSet {
if members == nil {
return awsStringSet{}
}
if single, ok := members.(string); ok {
return awsStringSet{single}
}
if multiple, ok := members.([]interface{}); ok {
actions := make([]string, len(multiple))
for i, action := range multiple {
actions[i] = action.(string)
}
return awsStringSet(actions)
}
return nil
}
func (actions awsStringSet) equals(other awsStringSet) bool {
if len(actions) != len(other) {
return false
}
ourMap := map[string]struct{}{}
theirMap := map[string]struct{}{}
for _, action := range actions {
ourMap[action] = struct{}{}
}
for _, action := range other {
theirMap[action] = struct{}{}
}
return reflect.DeepEqual(ourMap, theirMap)
}
| vendor/github.com/jen20/awspolicyequivalence/aws_policy_equivalence.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00038937904173508286,
0.00017671380192041397,
0.00016428035451099277,
0.00017082439444493502,
0.00003716968421940692
] |
{
"id": 1,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"only word characters and hyphens allowed in %q: %q\",\n",
"\t\t\tk, value))\n",
"\t}\n",
"\n",
"\tif len(value) > 80 {\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"only word characters, numbers, underscores, periods, and hyphens allowed in %q: %q\",\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 328
} | package scheduler
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"net/http"
)
// DayOfWeek enumerates the values for day of week.
type DayOfWeek string
const (
// Friday specifies the friday state for day of week.
Friday DayOfWeek = "Friday"
// Monday specifies the monday state for day of week.
Monday DayOfWeek = "Monday"
// Saturday specifies the saturday state for day of week.
Saturday DayOfWeek = "Saturday"
// Sunday specifies the sunday state for day of week.
Sunday DayOfWeek = "Sunday"
// Thursday specifies the thursday state for day of week.
Thursday DayOfWeek = "Thursday"
// Tuesday specifies the tuesday state for day of week.
Tuesday DayOfWeek = "Tuesday"
// Wednesday specifies the wednesday state for day of week.
Wednesday DayOfWeek = "Wednesday"
)
// HTTPAuthenticationType enumerates the values for http authentication type.
type HTTPAuthenticationType string
const (
// ActiveDirectoryOAuth specifies the active directory o auth state for
// http authentication type.
ActiveDirectoryOAuth HTTPAuthenticationType = "ActiveDirectoryOAuth"
// Basic specifies the basic state for http authentication type.
Basic HTTPAuthenticationType = "Basic"
// ClientCertificate specifies the client certificate state for http
// authentication type.
ClientCertificate HTTPAuthenticationType = "ClientCertificate"
// NotSpecified specifies the not specified state for http authentication
// type.
NotSpecified HTTPAuthenticationType = "NotSpecified"
)
// JobActionType enumerates the values for job action type.
type JobActionType string
const (
// HTTP specifies the http state for job action type.
HTTP JobActionType = "Http"
// HTTPS specifies the https state for job action type.
HTTPS JobActionType = "Https"
// ServiceBusQueue specifies the service bus queue state for job action
// type.
ServiceBusQueue JobActionType = "ServiceBusQueue"
// ServiceBusTopic specifies the service bus topic state for job action
// type.
ServiceBusTopic JobActionType = "ServiceBusTopic"
// StorageQueue specifies the storage queue state for job action type.
StorageQueue JobActionType = "StorageQueue"
)
// JobCollectionState enumerates the values for job collection state.
type JobCollectionState string
const (
// Deleted specifies the deleted state for job collection state.
Deleted JobCollectionState = "Deleted"
// Disabled specifies the disabled state for job collection state.
Disabled JobCollectionState = "Disabled"
// Enabled specifies the enabled state for job collection state.
Enabled JobCollectionState = "Enabled"
// Suspended specifies the suspended state for job collection state.
Suspended JobCollectionState = "Suspended"
)
// JobExecutionStatus enumerates the values for job execution status.
type JobExecutionStatus string
const (
// Completed specifies the completed state for job execution status.
Completed JobExecutionStatus = "Completed"
// Failed specifies the failed state for job execution status.
Failed JobExecutionStatus = "Failed"
// Postponed specifies the postponed state for job execution status.
Postponed JobExecutionStatus = "Postponed"
)
// JobHistoryActionName enumerates the values for job history action name.
type JobHistoryActionName string
const (
// ErrorAction specifies the error action state for job history action
// name.
ErrorAction JobHistoryActionName = "ErrorAction"
// MainAction specifies the main action state for job history action name.
MainAction JobHistoryActionName = "MainAction"
)
// JobScheduleDay enumerates the values for job schedule day.
type JobScheduleDay string
const (
// JobScheduleDayFriday specifies the job schedule day friday state for
// job schedule day.
JobScheduleDayFriday JobScheduleDay = "Friday"
// JobScheduleDayMonday specifies the job schedule day monday state for
// job schedule day.
JobScheduleDayMonday JobScheduleDay = "Monday"
// JobScheduleDaySaturday specifies the job schedule day saturday state
// for job schedule day.
JobScheduleDaySaturday JobScheduleDay = "Saturday"
// JobScheduleDaySunday specifies the job schedule day sunday state for
// job schedule day.
JobScheduleDaySunday JobScheduleDay = "Sunday"
// JobScheduleDayThursday specifies the job schedule day thursday state
// for job schedule day.
JobScheduleDayThursday JobScheduleDay = "Thursday"
// JobScheduleDayTuesday specifies the job schedule day tuesday state for
// job schedule day.
JobScheduleDayTuesday JobScheduleDay = "Tuesday"
// JobScheduleDayWednesday specifies the job schedule day wednesday state
// for job schedule day.
JobScheduleDayWednesday JobScheduleDay = "Wednesday"
)
// JobState enumerates the values for job state.
type JobState string
const (
// JobStateCompleted specifies the job state completed state for job state.
JobStateCompleted JobState = "Completed"
// JobStateDisabled specifies the job state disabled state for job state.
JobStateDisabled JobState = "Disabled"
// JobStateEnabled specifies the job state enabled state for job state.
JobStateEnabled JobState = "Enabled"
// JobStateFaulted specifies the job state faulted state for job state.
JobStateFaulted JobState = "Faulted"
)
// RecurrenceFrequency enumerates the values for recurrence frequency.
type RecurrenceFrequency string
const (
// Day specifies the day state for recurrence frequency.
Day RecurrenceFrequency = "Day"
// Hour specifies the hour state for recurrence frequency.
Hour RecurrenceFrequency = "Hour"
// Minute specifies the minute state for recurrence frequency.
Minute RecurrenceFrequency = "Minute"
// Month specifies the month state for recurrence frequency.
Month RecurrenceFrequency = "Month"
// Week specifies the week state for recurrence frequency.
Week RecurrenceFrequency = "Week"
)
// RetryType enumerates the values for retry type.
type RetryType string
const (
// Fixed specifies the fixed state for retry type.
Fixed RetryType = "Fixed"
// None specifies the none state for retry type.
None RetryType = "None"
)
// ServiceBusAuthenticationType enumerates the values for service bus
// authentication type.
type ServiceBusAuthenticationType string
const (
// ServiceBusAuthenticationTypeNotSpecified specifies the service bus
// authentication type not specified state for service bus authentication
// type.
ServiceBusAuthenticationTypeNotSpecified ServiceBusAuthenticationType = "NotSpecified"
// ServiceBusAuthenticationTypeSharedAccessKey specifies the service bus
// authentication type shared access key state for service bus
// authentication type.
ServiceBusAuthenticationTypeSharedAccessKey ServiceBusAuthenticationType = "SharedAccessKey"
)
// ServiceBusTransportType enumerates the values for service bus transport
// type.
type ServiceBusTransportType string
const (
// ServiceBusTransportTypeAMQP specifies the service bus transport type
// amqp state for service bus transport type.
ServiceBusTransportTypeAMQP ServiceBusTransportType = "AMQP"
// ServiceBusTransportTypeNetMessaging specifies the service bus transport
// type net messaging state for service bus transport type.
ServiceBusTransportTypeNetMessaging ServiceBusTransportType = "NetMessaging"
// ServiceBusTransportTypeNotSpecified specifies the service bus transport
// type not specified state for service bus transport type.
ServiceBusTransportTypeNotSpecified ServiceBusTransportType = "NotSpecified"
)
// SkuDefinition enumerates the values for sku definition.
type SkuDefinition string
const (
// Free specifies the free state for sku definition.
Free SkuDefinition = "Free"
// P10Premium specifies the p10 premium state for sku definition.
P10Premium SkuDefinition = "P10Premium"
// P20Premium specifies the p20 premium state for sku definition.
P20Premium SkuDefinition = "P20Premium"
// Standard specifies the standard state for sku definition.
Standard SkuDefinition = "Standard"
)
// BasicAuthentication is
type BasicAuthentication struct {
Type HTTPAuthenticationType `json:"type,omitempty"`
Username *string `json:"username,omitempty"`
Password *string `json:"password,omitempty"`
}
// ClientCertAuthentication is
type ClientCertAuthentication struct {
Type HTTPAuthenticationType `json:"type,omitempty"`
Password *string `json:"password,omitempty"`
Pfx *string `json:"pfx,omitempty"`
CertificateThumbprint *string `json:"certificateThumbprint,omitempty"`
CertificateExpirationDate *date.Time `json:"certificateExpirationDate,omitempty"`
CertificateSubjectName *string `json:"certificateSubjectName,omitempty"`
}
// HTTPAuthentication is
type HTTPAuthentication struct {
Type HTTPAuthenticationType `json:"type,omitempty"`
}
// HTTPRequest is
type HTTPRequest struct {
Authentication *HTTPAuthentication `json:"authentication,omitempty"`
URI *string `json:"uri,omitempty"`
Method *string `json:"method,omitempty"`
Body *string `json:"body,omitempty"`
Headers *map[string]*string `json:"headers,omitempty"`
}
// JobAction is
type JobAction struct {
Type JobActionType `json:"type,omitempty"`
Request *HTTPRequest `json:"request,omitempty"`
QueueMessage *StorageQueueMessage `json:"queueMessage,omitempty"`
ServiceBusQueueMessage *ServiceBusQueueMessage `json:"serviceBusQueueMessage,omitempty"`
ServiceBusTopicMessage *ServiceBusTopicMessage `json:"serviceBusTopicMessage,omitempty"`
RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"`
ErrorAction *JobErrorAction `json:"errorAction,omitempty"`
}
// JobCollectionDefinition is
type JobCollectionDefinition struct {
autorest.Response `json:"-"`
ID *string `json:"id,omitempty"`
Type *string `json:"type,omitempty"`
Name *string `json:"name,omitempty"`
Location *string `json:"location,omitempty"`
Tags *map[string]*string `json:"tags,omitempty"`
Properties *JobCollectionProperties `json:"properties,omitempty"`
}
// JobCollectionListResult is
type JobCollectionListResult struct {
autorest.Response `json:"-"`
Value *[]JobCollectionDefinition `json:"value,omitempty"`
NextLink *string `json:"nextLink,omitempty"`
}
// JobCollectionListResultPreparer prepares a request to retrieve the next set of results. It returns
// nil if no more results exist.
func (client JobCollectionListResult) JobCollectionListResultPreparer() (*http.Request, error) {
if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
return nil, nil
}
return autorest.Prepare(&http.Request{},
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(client.NextLink)))
}
// JobCollectionProperties is
type JobCollectionProperties struct {
Sku *Sku `json:"sku,omitempty"`
State JobCollectionState `json:"state,omitempty"`
Quota *JobCollectionQuota `json:"quota,omitempty"`
}
// JobCollectionQuota is
type JobCollectionQuota struct {
MaxJobCount *int32 `json:"maxJobCount,omitempty"`
MaxJobOccurrence *int32 `json:"maxJobOccurrence,omitempty"`
MaxRecurrence *JobMaxRecurrence `json:"maxRecurrence,omitempty"`
}
// JobDefinition is
type JobDefinition struct {
autorest.Response `json:"-"`
ID *string `json:"id,omitempty"`
Type *string `json:"type,omitempty"`
Name *string `json:"name,omitempty"`
Properties *JobProperties `json:"properties,omitempty"`
}
// JobErrorAction is
type JobErrorAction struct {
Type JobActionType `json:"type,omitempty"`
Request *HTTPRequest `json:"request,omitempty"`
QueueMessage *StorageQueueMessage `json:"queueMessage,omitempty"`
ServiceBusQueueMessage *ServiceBusQueueMessage `json:"serviceBusQueueMessage,omitempty"`
ServiceBusTopicMessage *ServiceBusTopicMessage `json:"serviceBusTopicMessage,omitempty"`
RetryPolicy *RetryPolicy `json:"retryPolicy,omitempty"`
}
// JobHistoryDefinition is
type JobHistoryDefinition struct {
ID *string `json:"id,omitempty"`
Type *string `json:"type,omitempty"`
Name *string `json:"name,omitempty"`
Properties *JobHistoryDefinitionProperties `json:"properties,omitempty"`
}
// JobHistoryDefinitionProperties is
type JobHistoryDefinitionProperties struct {
StartTime *date.Time `json:"startTime,omitempty"`
EndTime *date.Time `json:"endTime,omitempty"`
ExpectedExecutionTime *date.Time `json:"expectedExecutionTime,omitempty"`
ActionName JobHistoryActionName `json:"actionName,omitempty"`
Status JobExecutionStatus `json:"status,omitempty"`
Message *string `json:"message,omitempty"`
RetryCount *int32 `json:"retryCount,omitempty"`
RepeatCount *int32 `json:"repeatCount,omitempty"`
}
// JobHistoryFilter is
type JobHistoryFilter struct {
Status JobExecutionStatus `json:"status,omitempty"`
}
// JobHistoryListResult is
type JobHistoryListResult struct {
autorest.Response `json:"-"`
Value *[]JobHistoryDefinition `json:"value,omitempty"`
NextLink *string `json:"nextLink,omitempty"`
}
// JobHistoryListResultPreparer prepares a request to retrieve the next set of results. It returns
// nil if no more results exist.
func (client JobHistoryListResult) JobHistoryListResultPreparer() (*http.Request, error) {
if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
return nil, nil
}
return autorest.Prepare(&http.Request{},
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(client.NextLink)))
}
// JobListResult is
type JobListResult struct {
autorest.Response `json:"-"`
Value *[]JobDefinition `json:"value,omitempty"`
NextLink *string `json:"nextLink,omitempty"`
}
// JobListResultPreparer prepares a request to retrieve the next set of results. It returns
// nil if no more results exist.
func (client JobListResult) JobListResultPreparer() (*http.Request, error) {
if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 {
return nil, nil
}
return autorest.Prepare(&http.Request{},
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(client.NextLink)))
}
// JobMaxRecurrence is
type JobMaxRecurrence struct {
Frequency RecurrenceFrequency `json:"frequency,omitempty"`
Interval *int32 `json:"interval,omitempty"`
}
// JobProperties is
type JobProperties struct {
StartTime *date.Time `json:"startTime,omitempty"`
Action *JobAction `json:"action,omitempty"`
Recurrence *JobRecurrence `json:"recurrence,omitempty"`
State JobState `json:"state,omitempty"`
Status *JobStatus `json:"status,omitempty"`
}
// JobRecurrence is
type JobRecurrence struct {
Frequency RecurrenceFrequency `json:"frequency,omitempty"`
Interval *int32 `json:"interval,omitempty"`
Count *int32 `json:"count,omitempty"`
EndTime *date.Time `json:"endTime,omitempty"`
Schedule *JobRecurrenceSchedule `json:"schedule,omitempty"`
}
// JobRecurrenceSchedule is
type JobRecurrenceSchedule struct {
WeekDays *[]DayOfWeek `json:"weekDays,omitempty"`
Hours *[]int32 `json:"hours,omitempty"`
Minutes *[]int32 `json:"minutes,omitempty"`
MonthDays *[]int32 `json:"monthDays,omitempty"`
MonthlyOccurrences *[]JobRecurrenceScheduleMonthlyOccurrence `json:"monthlyOccurrences,omitempty"`
}
// JobRecurrenceScheduleMonthlyOccurrence is
type JobRecurrenceScheduleMonthlyOccurrence struct {
Day JobScheduleDay `json:"day,omitempty"`
Occurrence *int32 `json:"Occurrence,omitempty"`
}
// JobStateFilter is
type JobStateFilter struct {
State JobState `json:"state,omitempty"`
}
// JobStatus is
type JobStatus struct {
ExecutionCount *int32 `json:"executionCount,omitempty"`
FailureCount *int32 `json:"failureCount,omitempty"`
FaultedCount *int32 `json:"faultedCount,omitempty"`
LastExecutionTime *date.Time `json:"lastExecutionTime,omitempty"`
NextExecutionTime *date.Time `json:"nextExecutionTime,omitempty"`
}
// OAuthAuthentication is
type OAuthAuthentication struct {
Type HTTPAuthenticationType `json:"type,omitempty"`
Secret *string `json:"secret,omitempty"`
Tenant *string `json:"tenant,omitempty"`
Audience *string `json:"audience,omitempty"`
ClientID *string `json:"clientId,omitempty"`
}
// RetryPolicy is
type RetryPolicy struct {
RetryType RetryType `json:"retryType,omitempty"`
RetryInterval *string `json:"retryInterval,omitempty"`
RetryCount *int32 `json:"retryCount,omitempty"`
}
// ServiceBusAuthentication is
type ServiceBusAuthentication struct {
SasKey *string `json:"sasKey,omitempty"`
SasKeyName *string `json:"sasKeyName,omitempty"`
Type ServiceBusAuthenticationType `json:"type,omitempty"`
}
// ServiceBusBrokeredMessageProperties is
type ServiceBusBrokeredMessageProperties struct {
ContentType *string `json:"contentType,omitempty"`
CorrelationID *string `json:"correlationId,omitempty"`
ForcePersistence *bool `json:"forcePersistence,omitempty"`
Label *string `json:"label,omitempty"`
MessageID *string `json:"messageId,omitempty"`
PartitionKey *string `json:"partitionKey,omitempty"`
ReplyTo *string `json:"replyTo,omitempty"`
ReplyToSessionID *string `json:"replyToSessionId,omitempty"`
ScheduledEnqueueTimeUtc *date.Time `json:"scheduledEnqueueTimeUtc,omitempty"`
SessionID *string `json:"sessionId,omitempty"`
TimeToLive *date.Time `json:"timeToLive,omitempty"`
To *string `json:"to,omitempty"`
ViaPartitionKey *string `json:"viaPartitionKey,omitempty"`
}
// ServiceBusMessage is
type ServiceBusMessage struct {
Authentication *ServiceBusAuthentication `json:"authentication,omitempty"`
BrokeredMessageProperties *ServiceBusBrokeredMessageProperties `json:"brokeredMessageProperties,omitempty"`
CustomMessageProperties *map[string]*string `json:"customMessageProperties,omitempty"`
Message *string `json:"message,omitempty"`
Namespace *string `json:"namespace,omitempty"`
TransportType ServiceBusTransportType `json:"transportType,omitempty"`
}
// ServiceBusQueueMessage is
type ServiceBusQueueMessage struct {
Authentication *ServiceBusAuthentication `json:"authentication,omitempty"`
BrokeredMessageProperties *ServiceBusBrokeredMessageProperties `json:"brokeredMessageProperties,omitempty"`
CustomMessageProperties *map[string]*string `json:"customMessageProperties,omitempty"`
Message *string `json:"message,omitempty"`
Namespace *string `json:"namespace,omitempty"`
TransportType ServiceBusTransportType `json:"transportType,omitempty"`
QueueName *string `json:"queueName,omitempty"`
}
// ServiceBusTopicMessage is
type ServiceBusTopicMessage struct {
Authentication *ServiceBusAuthentication `json:"authentication,omitempty"`
BrokeredMessageProperties *ServiceBusBrokeredMessageProperties `json:"brokeredMessageProperties,omitempty"`
CustomMessageProperties *map[string]*string `json:"customMessageProperties,omitempty"`
Message *string `json:"message,omitempty"`
Namespace *string `json:"namespace,omitempty"`
TransportType ServiceBusTransportType `json:"transportType,omitempty"`
TopicPath *string `json:"topicPath,omitempty"`
}
// Sku is
type Sku struct {
Name SkuDefinition `json:"name,omitempty"`
}
// StorageQueueMessage is
type StorageQueueMessage struct {
StorageAccount *string `json:"storageAccount,omitempty"`
QueueName *string `json:"queueName,omitempty"`
SasToken *string `json:"sasToken,omitempty"`
Message *string `json:"message,omitempty"`
}
| vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/models.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.0008761920034885406,
0.0001982738176593557,
0.00016370990488212556,
0.00017104933795053512,
0.00010060664499178529
] |
{
"id": 2,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q cannot be an empty string: %q\", k, value))\n",
"\t}\n",
"\tif !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`[a-zA-Z0-9_]$`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 341
} | package azurerm
import (
"fmt"
"log"
"regexp"
"time"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/jen20/riviera/azure"
)
func resourceArmLoadBalancerRule() *schema.Resource {
return &schema.Resource{
Create: resourceArmLoadBalancerRuleCreate,
Read: resourceArmLoadBalancerRuleRead,
Update: resourceArmLoadBalancerRuleCreate,
Delete: resourceArmLoadBalancerRuleDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateArmLoadBalancerRuleName,
},
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: azureRMNormalizeLocation,
},
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"loadbalancer_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"frontend_ip_configuration_name": {
Type: schema.TypeString,
Required: true,
},
"frontend_ip_configuration_id": {
Type: schema.TypeString,
Computed: true,
},
"backend_address_pool_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"protocol": {
Type: schema.TypeString,
Required: true,
},
"frontend_port": {
Type: schema.TypeInt,
Required: true,
},
"backend_port": {
Type: schema.TypeInt,
Required: true,
},
"probe_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"enable_floating_ip": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"idle_timeout_in_minutes": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"load_distribution": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func resourceArmLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
_, _, exists = findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if exists {
return fmt.Errorf("A LoadBalancer Rule with name %q already exists.", d.Get("name").(string))
}
newLbRule, err := expandAzureRmLoadBalancerRule(d, loadBalancer)
if err != nil {
return errwrap.Wrapf("Error Exanding LoadBalancer Rule {{err}}", err)
}
lbRules := append(*loadBalancer.Properties.LoadBalancingRules, *newLbRule)
loadBalancer.Properties.LoadBalancingRules = &lbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
var rule_id string
for _, LoadBalancingRule := range *(*read.Properties).LoadBalancingRules {
if *LoadBalancingRule.Name == d.Get("name").(string) {
rule_id = *LoadBalancingRule.ID
}
}
if rule_id != "" {
d.SetId(rule_id)
} else {
return fmt.Errorf("Cannot find created LoadBalancer Rule ID %q", rule_id)
}
log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted", "Updating"},
Target: []string{"Succeeded"},
Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName),
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err)
}
return resourceArmLoadBalancerRuleRead(d, meta)
}
func resourceArmLoadBalancerRuleRead(d *schema.ResourceData, meta interface{}) error {
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
configs := *loadBalancer.Properties.LoadBalancingRules
for _, config := range configs {
if *config.Name == d.Get("name").(string) {
d.Set("name", config.Name)
d.Set("protocol", config.Properties.Protocol)
d.Set("frontend_port", config.Properties.FrontendPort)
d.Set("backend_port", config.Properties.BackendPort)
if config.Properties.EnableFloatingIP != nil {
d.Set("enable_floating_ip", config.Properties.EnableFloatingIP)
}
if config.Properties.IdleTimeoutInMinutes != nil {
d.Set("idle_timeout_in_minutes", config.Properties.IdleTimeoutInMinutes)
}
if config.Properties.FrontendIPConfiguration != nil {
d.Set("frontend_ip_configuration_id", config.Properties.FrontendIPConfiguration.ID)
}
if config.Properties.BackendAddressPool != nil {
d.Set("backend_address_pool_id", config.Properties.BackendAddressPool.ID)
}
if config.Properties.Probe != nil {
d.Set("probe_id", config.Properties.Probe.ID)
}
if config.Properties.LoadDistribution != "" {
d.Set("load_distribution", config.Properties.LoadDistribution)
}
}
}
return nil
}
func resourceArmLoadBalancerRuleDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
return nil
}
_, index, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if !exists {
return nil
}
oldLbRules := *loadBalancer.Properties.LoadBalancingRules
newLbRules := append(oldLbRules[:index], oldLbRules[index+1:]...)
loadBalancer.Properties.LoadBalancingRules = &newLbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
return nil
}
func expandAzureRmLoadBalancerRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.LoadBalancingRule, error) {
properties := network.LoadBalancingRulePropertiesFormat{
Protocol: network.TransportProtocol(d.Get("protocol").(string)),
FrontendPort: azure.Int32(int32(d.Get("frontend_port").(int))),
BackendPort: azure.Int32(int32(d.Get("backend_port").(int))),
EnableFloatingIP: azure.Bool(d.Get("enable_floating_ip").(bool)),
}
if v, ok := d.GetOk("idle_timeout_in_minutes"); ok {
properties.IdleTimeoutInMinutes = azure.Int32(int32(v.(int)))
}
if v := d.Get("load_distribution").(string); v != "" {
properties.LoadDistribution = network.LoadDistribution(v)
}
if v := d.Get("frontend_ip_configuration_name").(string); v != "" {
rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v)
if !exists {
return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v)
}
feip := network.SubResource{
ID: rule.ID,
}
properties.FrontendIPConfiguration = &feip
}
if v := d.Get("backend_address_pool_id").(string); v != "" {
beAP := network.SubResource{
ID: &v,
}
properties.BackendAddressPool = &beAP
}
if v := d.Get("probe_id").(string); v != "" {
pid := network.SubResource{
ID: &v,
}
properties.Probe = &pid
}
lbRule := network.LoadBalancingRule{
Name: azure.String(d.Get("name").(string)),
Properties: &properties,
}
return &lbRule, nil
}
func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only word characters and hyphens allowed in %q: %q",
k, value))
}
if len(value) > 80 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 80 characters: %q", k, value))
}
if len(value) == 0 {
errors = append(errors, fmt.Errorf(
"%q cannot be an empty string: %q", k, value))
}
if !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must end with a word character: %q", k, value))
}
if !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must start with a word character: %q", k, value))
}
return
}
| builtin/providers/azurerm/resource_arm_loadbalancer_rule.go | 1 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.9969063401222229,
0.05582010746002197,
0.0001607061713002622,
0.00017132754146587104,
0.22809256613254547
] |
{
"id": 2,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q cannot be an empty string: %q\", k, value))\n",
"\t}\n",
"\tif !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`[a-zA-Z0-9_]$`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 341
} | ---
layout: "openstack"
page_title: "OpenStack: openstack_networking_floatingip_v2"
sidebar_current: "docs-openstack-resource-networking-floatingip-v2"
description: |-
Manages a V2 floating IP resource within OpenStack Neutron (networking).
---
# openstack\_networking\_floatingip_v2
Manages a V2 floating IP resource within OpenStack Neutron (networking)
that can be used for load balancers.
These are similar to Nova (compute) floating IP resources,
but only compute floating IPs can be used with compute instances.
## Example Usage
```
resource "openstack_networking_floatingip_v2" "floatip_1" {
pool = "public"
}
```
## Argument Reference
The following arguments are supported:
* `region` - (Required) The region in which to obtain the V2 Networking client.
A Networking client is needed to create a floating IP that can be used with
another networking resource, such as a load balancer. If omitted, the
`OS_REGION_NAME` environment variable is used. Changing this creates a new
floating IP (which may or may not have a different address).
* `pool` - (Required) The name of the pool from which to obtain the floating
IP. Changing this creates a new floating IP.
* `port_id` - (Optional) ID of an existing port with at least one IP address to
associate with this floating IP.
* `tenant_id` - (Optional) The target tenant ID in which to allocate the floating
IP, if you specify this together with a port_id, make sure the target port
belongs to the same tenant. Changing this creates a new floating IP (which
may or may not have a different address)
* `fixed_ip` - Fixed IP of the port to associate with this floating IP. Required if
the port has multiple fixed IPs.
## Attributes Reference
The following attributes are exported:
* `region` - See Argument Reference above.
* `pool` - See Argument Reference above.
* `address` - The actual floating IP address itself.
* `port_id` - ID of associated port.
* `tenant_id` - the ID of the tenant in which to create the floating IP.
* `fixed_ip` - The fixed IP which the floating IP maps to.
## Import
Floating IPs can be imported using the `id`, e.g.
```
$ terraform import openstack_networking_floatingip_v2.floatip_1 2c7f39f3-702b-48d1-940c-b50384177ee1
```
| website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00017165204917546362,
0.00016626296564936638,
0.00016240977856796235,
0.00016459205653518438,
0.0000034639060686458834
] |
{
"id": 2,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q cannot be an empty string: %q\", k, value))\n",
"\t}\n",
"\tif !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`[a-zA-Z0-9_]$`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 341
} | // Package unidecode implements a unicode transliterator
// which replaces non-ASCII characters with their ASCII
// approximations.
package unidecode
//go:generate go run make_table.go
import (
"sync"
"unicode"
)
const pooledCapacity = 64
var (
slicePool sync.Pool
decodingOnce sync.Once
)
// Unidecode implements a unicode transliterator, which
// replaces non-ASCII characters with their ASCII
// counterparts.
// Given an unicode encoded string, returns
// another string with non-ASCII characters replaced
// with their closest ASCII counterparts.
// e.g. Unicode("áéíóú") => "aeiou"
func Unidecode(s string) string {
decodingOnce.Do(decodeTransliterations)
l := len(s)
var r []rune
if l > pooledCapacity {
r = make([]rune, 0, len(s))
} else {
if x := slicePool.Get(); x != nil {
r = x.([]rune)[:0]
} else {
r = make([]rune, 0, pooledCapacity)
}
}
for _, c := range s {
if c <= unicode.MaxASCII {
r = append(r, c)
continue
}
if c > unicode.MaxRune || c > transCount {
/* Ignore reserved chars */
continue
}
if d := transliterations[c]; d != nil {
r = append(r, d...)
}
}
res := string(r)
if l <= pooledCapacity {
slicePool.Put(r)
}
return res
}
| vendor/github.com/rainycape/unidecode/unidecode.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00017561022832524031,
0.00017100996046792716,
0.0001683282171143219,
0.00017070406465791166,
0.000002402077825536253
] |
{
"id": 2,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q cannot be an empty string: %q\", k, value))\n",
"\t}\n",
"\tif !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`[a-zA-Z0-9_]$`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 341
} | ---
layout: "azurerm"
page_title: "Azure Resource Manager: azurerm_cdn_profile"
sidebar_current: "docs-azurerm-resource-cdn-profile"
description: |-
Create a CDN Profile to create a collection of CDN Endpoints.
---
# azurerm\_cdn\_profile
Create a CDN Profile to create a collection of CDN Endpoints.
## Example Usage
```
resource "azurerm_resource_group" "test" {
name = "resourceGroup1"
location = "West US"
}
resource "azurerm_cdn_profile" "test" {
name = "acceptanceTestCdnProfile1"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
sku = "Standard_Verizon"
tags {
environment = "Production"
cost_center = "MSFT"
}
}
```
## Argument Reference
The following arguments are supported:
* `name` - (Required) Specifies the name of the CDN Profile. Changing this forces a
new resource to be created.
* `resource_group_name` - (Required) The name of the resource group in which to
create the CDN Profile.
* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
* `sku` - (Required) The pricing related information of current CDN profile. Accepted values are `Standard_Verizon`, `Standard_Akamai` or `Premium_Verizon`.
* `tags` - (Optional) A mapping of tags to assign to the resource.
## Attributes Reference
The following attributes are exported:
* `id` - The CDN Profile ID. | website/source/docs/providers/azurerm/r/cdn_profile.html.markdown | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00017325264343526214,
0.0001671961072133854,
0.00016426875663455576,
0.00016640669491607696,
0.0000029608552267745836
] |
{
"id": 3,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q must end with a word character: %q\", k, value))\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"%q must end with a word character, number, or underscore: %q\", k, value))\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 343
} | package azurerm
import (
"fmt"
"log"
"regexp"
"time"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/jen20/riviera/azure"
)
func resourceArmLoadBalancerRule() *schema.Resource {
return &schema.Resource{
Create: resourceArmLoadBalancerRuleCreate,
Read: resourceArmLoadBalancerRuleRead,
Update: resourceArmLoadBalancerRuleCreate,
Delete: resourceArmLoadBalancerRuleDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateArmLoadBalancerRuleName,
},
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: azureRMNormalizeLocation,
},
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"loadbalancer_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"frontend_ip_configuration_name": {
Type: schema.TypeString,
Required: true,
},
"frontend_ip_configuration_id": {
Type: schema.TypeString,
Computed: true,
},
"backend_address_pool_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"protocol": {
Type: schema.TypeString,
Required: true,
},
"frontend_port": {
Type: schema.TypeInt,
Required: true,
},
"backend_port": {
Type: schema.TypeInt,
Required: true,
},
"probe_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"enable_floating_ip": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"idle_timeout_in_minutes": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"load_distribution": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func resourceArmLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
_, _, exists = findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if exists {
return fmt.Errorf("A LoadBalancer Rule with name %q already exists.", d.Get("name").(string))
}
newLbRule, err := expandAzureRmLoadBalancerRule(d, loadBalancer)
if err != nil {
return errwrap.Wrapf("Error Exanding LoadBalancer Rule {{err}}", err)
}
lbRules := append(*loadBalancer.Properties.LoadBalancingRules, *newLbRule)
loadBalancer.Properties.LoadBalancingRules = &lbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
var rule_id string
for _, LoadBalancingRule := range *(*read.Properties).LoadBalancingRules {
if *LoadBalancingRule.Name == d.Get("name").(string) {
rule_id = *LoadBalancingRule.ID
}
}
if rule_id != "" {
d.SetId(rule_id)
} else {
return fmt.Errorf("Cannot find created LoadBalancer Rule ID %q", rule_id)
}
log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted", "Updating"},
Target: []string{"Succeeded"},
Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName),
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err)
}
return resourceArmLoadBalancerRuleRead(d, meta)
}
func resourceArmLoadBalancerRuleRead(d *schema.ResourceData, meta interface{}) error {
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
configs := *loadBalancer.Properties.LoadBalancingRules
for _, config := range configs {
if *config.Name == d.Get("name").(string) {
d.Set("name", config.Name)
d.Set("protocol", config.Properties.Protocol)
d.Set("frontend_port", config.Properties.FrontendPort)
d.Set("backend_port", config.Properties.BackendPort)
if config.Properties.EnableFloatingIP != nil {
d.Set("enable_floating_ip", config.Properties.EnableFloatingIP)
}
if config.Properties.IdleTimeoutInMinutes != nil {
d.Set("idle_timeout_in_minutes", config.Properties.IdleTimeoutInMinutes)
}
if config.Properties.FrontendIPConfiguration != nil {
d.Set("frontend_ip_configuration_id", config.Properties.FrontendIPConfiguration.ID)
}
if config.Properties.BackendAddressPool != nil {
d.Set("backend_address_pool_id", config.Properties.BackendAddressPool.ID)
}
if config.Properties.Probe != nil {
d.Set("probe_id", config.Properties.Probe.ID)
}
if config.Properties.LoadDistribution != "" {
d.Set("load_distribution", config.Properties.LoadDistribution)
}
}
}
return nil
}
func resourceArmLoadBalancerRuleDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
return nil
}
_, index, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if !exists {
return nil
}
oldLbRules := *loadBalancer.Properties.LoadBalancingRules
newLbRules := append(oldLbRules[:index], oldLbRules[index+1:]...)
loadBalancer.Properties.LoadBalancingRules = &newLbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
return nil
}
func expandAzureRmLoadBalancerRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.LoadBalancingRule, error) {
properties := network.LoadBalancingRulePropertiesFormat{
Protocol: network.TransportProtocol(d.Get("protocol").(string)),
FrontendPort: azure.Int32(int32(d.Get("frontend_port").(int))),
BackendPort: azure.Int32(int32(d.Get("backend_port").(int))),
EnableFloatingIP: azure.Bool(d.Get("enable_floating_ip").(bool)),
}
if v, ok := d.GetOk("idle_timeout_in_minutes"); ok {
properties.IdleTimeoutInMinutes = azure.Int32(int32(v.(int)))
}
if v := d.Get("load_distribution").(string); v != "" {
properties.LoadDistribution = network.LoadDistribution(v)
}
if v := d.Get("frontend_ip_configuration_name").(string); v != "" {
rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v)
if !exists {
return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v)
}
feip := network.SubResource{
ID: rule.ID,
}
properties.FrontendIPConfiguration = &feip
}
if v := d.Get("backend_address_pool_id").(string); v != "" {
beAP := network.SubResource{
ID: &v,
}
properties.BackendAddressPool = &beAP
}
if v := d.Get("probe_id").(string); v != "" {
pid := network.SubResource{
ID: &v,
}
properties.Probe = &pid
}
lbRule := network.LoadBalancingRule{
Name: azure.String(d.Get("name").(string)),
Properties: &properties,
}
return &lbRule, nil
}
func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only word characters and hyphens allowed in %q: %q",
k, value))
}
if len(value) > 80 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 80 characters: %q", k, value))
}
if len(value) == 0 {
errors = append(errors, fmt.Errorf(
"%q cannot be an empty string: %q", k, value))
}
if !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must end with a word character: %q", k, value))
}
if !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must start with a word character: %q", k, value))
}
return
}
| builtin/providers/azurerm/resource_arm_loadbalancer_rule.go | 1 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.9975323677062988,
0.08251527696847916,
0.00016467996465507895,
0.00016901551862247288,
0.27305543422698975
] |
{
"id": 3,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q must end with a word character: %q\", k, value))\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"%q must end with a word character, number, or underscore: %q\", k, value))\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 343
} | package aws
import (
"bytes"
"fmt"
"log"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/rds"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsDbSecurityGroup() *schema.Resource {
return &schema.Resource{
Create: resourceAwsDbSecurityGroupCreate,
Read: resourceAwsDbSecurityGroupRead,
Update: resourceAwsDbSecurityGroupUpdate,
Delete: resourceAwsDbSecurityGroupDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"arn": &schema.Schema{
Type: schema.TypeString,
Computed: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"description": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "Managed by Terraform",
},
"ingress": &schema.Schema{
Type: schema.TypeSet,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cidr": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"security_group_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"security_group_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"security_group_owner_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
},
Set: resourceAwsDbSecurityGroupIngressHash,
},
"tags": tagsSchema(),
},
}
}
func resourceAwsDbSecurityGroupCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{}))
var err error
var errs []error
opts := rds.CreateDBSecurityGroupInput{
DBSecurityGroupName: aws.String(d.Get("name").(string)),
DBSecurityGroupDescription: aws.String(d.Get("description").(string)),
Tags: tags,
}
log.Printf("[DEBUG] DB Security Group create configuration: %#v", opts)
_, err = conn.CreateDBSecurityGroup(&opts)
if err != nil {
return fmt.Errorf("Error creating DB Security Group: %s", err)
}
d.SetId(d.Get("name").(string))
log.Printf("[INFO] DB Security Group ID: %s", d.Id())
sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta)
if err != nil {
return err
}
ingresses := d.Get("ingress").(*schema.Set)
for _, ing := range ingresses.List() {
err := resourceAwsDbSecurityGroupAuthorizeRule(ing, *sg.DBSecurityGroupName, conn)
if err != nil {
errs = append(errs, err)
}
}
if len(errs) > 0 {
return &multierror.Error{Errors: errs}
}
log.Println(
"[INFO] Waiting for Ingress Authorizations to be authorized")
stateConf := &resource.StateChangeConf{
Pending: []string{"authorizing"},
Target: []string{"authorized"},
Refresh: resourceAwsDbSecurityGroupStateRefreshFunc(d, meta),
Timeout: 10 * time.Minute,
}
// Wait, catching any errors
_, err = stateConf.WaitForState()
if err != nil {
return err
}
return resourceAwsDbSecurityGroupRead(d, meta)
}
func resourceAwsDbSecurityGroupRead(d *schema.ResourceData, meta interface{}) error {
sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta)
if err != nil {
return err
}
d.Set("name", *sg.DBSecurityGroupName)
d.Set("description", *sg.DBSecurityGroupDescription)
// Create an empty schema.Set to hold all ingress rules
rules := &schema.Set{
F: resourceAwsDbSecurityGroupIngressHash,
}
for _, v := range sg.IPRanges {
rule := map[string]interface{}{"cidr": *v.CIDRIP}
rules.Add(rule)
}
for _, g := range sg.EC2SecurityGroups {
rule := map[string]interface{}{}
if g.EC2SecurityGroupId != nil {
rule["security_group_id"] = *g.EC2SecurityGroupId
}
if g.EC2SecurityGroupName != nil {
rule["security_group_name"] = *g.EC2SecurityGroupName
}
if g.EC2SecurityGroupOwnerId != nil {
rule["security_group_owner_id"] = *g.EC2SecurityGroupOwnerId
}
rules.Add(rule)
}
d.Set("ingress", rules)
conn := meta.(*AWSClient).rdsconn
arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region)
if err != nil {
name := "<empty>"
if sg.DBSecurityGroupName != nil && *sg.DBSecurityGroupName != "" {
name = *sg.DBSecurityGroupName
}
log.Printf("[DEBUG] Error building ARN for DB Security Group, not setting Tags for DB Security Group %s", name)
} else {
d.Set("arn", arn)
resp, err := conn.ListTagsForResource(&rds.ListTagsForResourceInput{
ResourceName: aws.String(arn),
})
if err != nil {
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
}
var dt []*rds.Tag
if len(resp.TagList) > 0 {
dt = resp.TagList
}
d.Set("tags", tagsToMapRDS(dt))
}
return nil
}
func resourceAwsDbSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
d.Partial(true)
if arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).partition, meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil {
if err := setTagsRDS(conn, d, arn); err != nil {
return err
} else {
d.SetPartial("tags")
}
}
if d.HasChange("ingress") {
sg, err := resourceAwsDbSecurityGroupRetrieve(d, meta)
if err != nil {
return err
}
oi, ni := d.GetChange("ingress")
if oi == nil {
oi = new(schema.Set)
}
if ni == nil {
ni = new(schema.Set)
}
ois := oi.(*schema.Set)
nis := ni.(*schema.Set)
removeIngress := ois.Difference(nis).List()
newIngress := nis.Difference(ois).List()
// DELETE old Ingress rules
for _, ing := range removeIngress {
err := resourceAwsDbSecurityGroupRevokeRule(ing, *sg.DBSecurityGroupName, conn)
if err != nil {
return err
}
}
// ADD new/updated Ingress rules
for _, ing := range newIngress {
err := resourceAwsDbSecurityGroupAuthorizeRule(ing, *sg.DBSecurityGroupName, conn)
if err != nil {
return err
}
}
}
d.Partial(false)
return resourceAwsDbSecurityGroupRead(d, meta)
}
func resourceAwsDbSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).rdsconn
log.Printf("[DEBUG] DB Security Group destroy: %v", d.Id())
opts := rds.DeleteDBSecurityGroupInput{DBSecurityGroupName: aws.String(d.Id())}
log.Printf("[DEBUG] DB Security Group destroy configuration: %v", opts)
_, err := conn.DeleteDBSecurityGroup(&opts)
if err != nil {
newerr, ok := err.(awserr.Error)
if ok && newerr.Code() == "InvalidDBSecurityGroup.NotFound" {
return nil
}
return err
}
return nil
}
func resourceAwsDbSecurityGroupRetrieve(d *schema.ResourceData, meta interface{}) (*rds.DBSecurityGroup, error) {
conn := meta.(*AWSClient).rdsconn
opts := rds.DescribeDBSecurityGroupsInput{
DBSecurityGroupName: aws.String(d.Id()),
}
log.Printf("[DEBUG] DB Security Group describe configuration: %#v", opts)
resp, err := conn.DescribeDBSecurityGroups(&opts)
if err != nil {
return nil, fmt.Errorf("Error retrieving DB Security Groups: %s", err)
}
if len(resp.DBSecurityGroups) != 1 ||
*resp.DBSecurityGroups[0].DBSecurityGroupName != d.Id() {
return nil, fmt.Errorf("Unable to find DB Security Group: %#v", resp.DBSecurityGroups)
}
return resp.DBSecurityGroups[0], nil
}
// Authorizes the ingress rule on the db security group
func resourceAwsDbSecurityGroupAuthorizeRule(ingress interface{}, dbSecurityGroupName string, conn *rds.RDS) error {
ing := ingress.(map[string]interface{})
opts := rds.AuthorizeDBSecurityGroupIngressInput{
DBSecurityGroupName: aws.String(dbSecurityGroupName),
}
if attr, ok := ing["cidr"]; ok && attr != "" {
opts.CIDRIP = aws.String(attr.(string))
}
if attr, ok := ing["security_group_name"]; ok && attr != "" {
opts.EC2SecurityGroupName = aws.String(attr.(string))
}
if attr, ok := ing["security_group_id"]; ok && attr != "" {
opts.EC2SecurityGroupId = aws.String(attr.(string))
}
if attr, ok := ing["security_group_owner_id"]; ok && attr != "" {
opts.EC2SecurityGroupOwnerId = aws.String(attr.(string))
}
log.Printf("[DEBUG] Authorize ingress rule configuration: %#v", opts)
_, err := conn.AuthorizeDBSecurityGroupIngress(&opts)
if err != nil {
return fmt.Errorf("Error authorizing security group ingress: %s", err)
}
return nil
}
// Revokes the ingress rule on the db security group
func resourceAwsDbSecurityGroupRevokeRule(ingress interface{}, dbSecurityGroupName string, conn *rds.RDS) error {
ing := ingress.(map[string]interface{})
opts := rds.RevokeDBSecurityGroupIngressInput{
DBSecurityGroupName: aws.String(dbSecurityGroupName),
}
if attr, ok := ing["cidr"]; ok && attr != "" {
opts.CIDRIP = aws.String(attr.(string))
}
if attr, ok := ing["security_group_name"]; ok && attr != "" {
opts.EC2SecurityGroupName = aws.String(attr.(string))
}
if attr, ok := ing["security_group_id"]; ok && attr != "" {
opts.EC2SecurityGroupId = aws.String(attr.(string))
}
if attr, ok := ing["security_group_owner_id"]; ok && attr != "" {
opts.EC2SecurityGroupOwnerId = aws.String(attr.(string))
}
log.Printf("[DEBUG] Revoking ingress rule configuration: %#v", opts)
_, err := conn.RevokeDBSecurityGroupIngress(&opts)
if err != nil {
return fmt.Errorf("Error revoking security group ingress: %s", err)
}
return nil
}
func resourceAwsDbSecurityGroupIngressHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
if v, ok := m["cidr"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["security_group_name"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["security_group_id"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["security_group_owner_id"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
return hashcode.String(buf.String())
}
func resourceAwsDbSecurityGroupStateRefreshFunc(
d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
v, err := resourceAwsDbSecurityGroupRetrieve(d, meta)
if err != nil {
log.Printf("Error on retrieving DB Security Group when waiting: %s", err)
return nil, "", err
}
statuses := make([]string, 0, len(v.EC2SecurityGroups)+len(v.IPRanges))
for _, ec2g := range v.EC2SecurityGroups {
statuses = append(statuses, *ec2g.Status)
}
for _, ips := range v.IPRanges {
statuses = append(statuses, *ips.Status)
}
for _, stat := range statuses {
// Not done
if stat != "authorized" {
return nil, "authorizing", nil
}
}
return v, "authorized", nil
}
}
func buildRDSSecurityGroupARN(identifier, partition, accountid, region string) (string, error) {
if partition == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS partition")
}
if accountid == "" {
return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID")
}
arn := fmt.Sprintf("arn:%s:rds:%s:%s:secgrp:%s", partition, region, accountid, identifier)
return arn, nil
}
| builtin/providers/aws/resource_aws_db_security_group.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.14975403249263763,
0.0037037543952465057,
0.0001646298769628629,
0.00017183375894092023,
0.022277509793639183
] |
{
"id": 3,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q must end with a word character: %q\", k, value))\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"%q must end with a word character, number, or underscore: %q\", k, value))\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 343
} | language: go
go:
- 1.1
- 1.2
- 1.3
install:
- go get github.com/motain/gocheck
- go get github.com/soniah/dnsmadeeasy
script:
- go test
| vendor/github.com/soniah/dnsmadeeasy/.travis.yml | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00017763703363016248,
0.000173837281181477,
0.00017003752873279154,
0.000173837281181477,
0.0000037997524486854672
] |
{
"id": 3,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q must end with a word character: %q\", k, value))\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"%q must end with a word character, number, or underscore: %q\", k, value))\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 343
} | // +build fixtures
package tenantnetworks
import (
"fmt"
"net/http"
"testing"
"time"
th "github.com/rackspace/gophercloud/testhelper"
"github.com/rackspace/gophercloud/testhelper/client"
)
// ListOutput is a sample response to a List call.
const ListOutput = `
{
"networks": [
{
"cidr": "10.0.0.0/29",
"id": "20c8acc0-f747-4d71-a389-46d078ebf047",
"label": "mynet_0"
},
{
"cidr": "10.0.0.10/29",
"id": "20c8acc0-f747-4d71-a389-46d078ebf000",
"label": "mynet_1"
}
]
}
`
// GetOutput is a sample response to a Get call.
const GetOutput = `
{
"network": {
"cidr": "10.0.0.10/29",
"id": "20c8acc0-f747-4d71-a389-46d078ebf000",
"label": "mynet_1"
}
}
`
// FirstNetwork is the first result in ListOutput.
var nilTime time.Time
var FirstNetwork = Network{
CIDR: "10.0.0.0/29",
ID: "20c8acc0-f747-4d71-a389-46d078ebf047",
Name: "mynet_0",
}
// SecondNetwork is the second result in ListOutput.
var SecondNetwork = Network{
CIDR: "10.0.0.10/29",
ID: "20c8acc0-f747-4d71-a389-46d078ebf000",
Name: "mynet_1",
}
// ExpectedNetworkSlice is the slice of results that should be parsed
// from ListOutput, in the expected order.
var ExpectedNetworkSlice = []Network{FirstNetwork, SecondNetwork}
// HandleListSuccessfully configures the test server to respond to a List request.
func HandleListSuccessfully(t *testing.T) {
th.Mux.HandleFunc("/os-tenant-networks", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
w.Header().Add("Content-Type", "application/json")
fmt.Fprintf(w, ListOutput)
})
}
// HandleGetSuccessfully configures the test server to respond to a Get request
// for an existing network.
func HandleGetSuccessfully(t *testing.T) {
th.Mux.HandleFunc("/os-tenant-networks/20c8acc0-f747-4d71-a389-46d078ebf000", func(w http.ResponseWriter, r *http.Request) {
th.TestMethod(t, r, "GET")
th.TestHeader(t, r, "X-Auth-Token", client.TokenID)
w.Header().Add("Content-Type", "application/json")
fmt.Fprintf(w, GetOutput)
})
}
| vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks/fixtures.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00018487320630811155,
0.00017058649973478168,
0.0001657850225456059,
0.00016919427434913814,
0.00000536319384991657
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\n",
"\tif !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`^[a-zA-Z0-9]`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 346
} | package azurerm
import (
"fmt"
"os"
"testing"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestResourceAzureRMLoadBalancerRuleNameLabel_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "-word",
ErrCount: 1,
},
{
Value: "testing-",
ErrCount: 1,
},
{
Value: "test123test",
ErrCount: 1,
},
{
Value: acctest.RandStringFromCharSet(81, "abcdedfed"),
ErrCount: 1,
},
{
Value: "test.rule",
ErrCount: 0,
},
{
Value: "test_rule",
ErrCount: 0,
},
{
Value: "test-rule",
ErrCount: 0,
},
{
Value: "TestRule",
ErrCount: 0,
},
}
for _, tc := range cases {
_, errors := validateArmLoadBalancerRuleName(tc.Value, "azurerm_lb_rule")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the Azure RM LoadBalancer Rule Name Label to trigger a validation error")
}
}
}
func TestAccAzureRMLoadBalancerRule_basic(t *testing.T) {
var lb network.LoadBalancer
ri := acctest.RandInt()
lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha))
subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID")
lbRule_id := fmt.Sprintf(
"/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/loadBalancingRules/%s",
subscriptionID, ri, ri, lbRuleName)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
Steps: []resource.TestStep{
{
Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb),
resource.TestCheckResourceAttr(
"azurerm_lb_rule.test", "id", lbRule_id),
),
},
},
})
}
func TestAccAzureRMLoadBalancerRule_removal(t *testing.T) {
var lb network.LoadBalancer
ri := acctest.RandInt()
lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
Steps: []resource.TestStep{
{
Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb),
),
},
{
Config: testAccAzureRMLoadBalancerRule_removal(ri),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
testCheckAzureRMLoadBalancerRuleNotExists(lbRuleName, &lb),
),
},
},
})
}
func testCheckAzureRMLoadBalancerRuleExists(lbRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, _, exists := findLoadBalancerRuleByName(lb, lbRuleName)
if !exists {
return fmt.Errorf("A LoadBalancer Rule with name %q cannot be found.", lbRuleName)
}
return nil
}
}
func testCheckAzureRMLoadBalancerRuleNotExists(lbRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, _, exists := findLoadBalancerRuleByName(lb, lbRuleName)
if exists {
return fmt.Errorf("A LoadBalancer Rule with name %q has been found.", lbRuleName)
}
return nil
}
}
func testAccAzureRMLoadBalancerRule_basic(rInt int, lbRuleName string) string {
return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestrg-%d"
location = "West US"
}
resource "azurerm_public_ip" "test" {
name = "test-ip-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
public_ip_address_allocation = "static"
}
resource "azurerm_lb" "test" {
name = "arm-test-loadbalancer-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
frontend_ip_configuration {
name = "one-%d"
public_ip_address_id = "${azurerm_public_ip.test.id}"
}
}
resource "azurerm_lb_rule" "test" {
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
loadbalancer_id = "${azurerm_lb.test.id}"
name = "%s"
protocol = "Tcp"
frontend_port = 3389
backend_port = 3389
frontend_ip_configuration_name = "one-%d"
}
`, rInt, rInt, rInt, rInt, lbRuleName, rInt)
}
func testAccAzureRMLoadBalancerRule_removal(rInt int) string {
return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestrg-%d"
location = "West US"
}
resource "azurerm_public_ip" "test" {
name = "test-ip-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
public_ip_address_allocation = "static"
}
resource "azurerm_lb" "test" {
name = "arm-test-loadbalancer-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
frontend_ip_configuration {
name = "one-%d"
public_ip_address_id = "${azurerm_public_ip.test.id}"
}
}
`, rInt, rInt, rInt, rInt)
}
| builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go | 1 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.005570471752434969,
0.00043995509622618556,
0.00016439442697446793,
0.00017027067951858044,
0.0011476341169327497
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\n",
"\tif !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`^[a-zA-Z0-9]`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 346
} | package digitalocean
import (
"fmt"
"testing"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccDigitalOceanFloatingIP_Region(t *testing.T) {
var floatingIP godo.FloatingIP
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanFloatingIPDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckDigitalOceanFloatingIPConfig_region,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanFloatingIPExists("digitalocean_floating_ip.foobar", &floatingIP),
resource.TestCheckResourceAttr(
"digitalocean_floating_ip.foobar", "region", "nyc3"),
),
},
},
})
}
func TestAccDigitalOceanFloatingIP_Droplet(t *testing.T) {
var floatingIP godo.FloatingIP
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckDigitalOceanFloatingIPDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckDigitalOceanFloatingIPConfig_droplet,
Check: resource.ComposeTestCheckFunc(
testAccCheckDigitalOceanFloatingIPExists("digitalocean_floating_ip.foobar", &floatingIP),
resource.TestCheckResourceAttr(
"digitalocean_floating_ip.foobar", "region", "nyc3"),
),
},
},
})
}
func testAccCheckDigitalOceanFloatingIPDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*godo.Client)
for _, rs := range s.RootModule().Resources {
if rs.Type != "digitalocean_floating_ip" {
continue
}
// Try to find the key
_, _, err := client.FloatingIPs.Get(rs.Primary.ID)
if err == nil {
return fmt.Errorf("Floating IP still exists")
}
}
return nil
}
func testAccCheckDigitalOceanFloatingIPExists(n string, floatingIP *godo.FloatingIP) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Record ID is set")
}
client := testAccProvider.Meta().(*godo.Client)
// Try to find the FloatingIP
foundFloatingIP, _, err := client.FloatingIPs.Get(rs.Primary.ID)
if err != nil {
return err
}
if foundFloatingIP.IP != rs.Primary.ID {
return fmt.Errorf("Record not found")
}
*floatingIP = *foundFloatingIP
return nil
}
}
var testAccCheckDigitalOceanFloatingIPConfig_region = `
resource "digitalocean_floating_ip" "foobar" {
region = "nyc3"
}`
var testAccCheckDigitalOceanFloatingIPConfig_droplet = fmt.Sprintf(`
resource "digitalocean_ssh_key" "foobar" {
name = "foobar"
public_key = "%s"
}
resource "digitalocean_droplet" "foobar" {
name = "baz"
size = "1gb"
image = "centos-7-x64"
region = "nyc3"
ipv6 = true
private_networking = true
ssh_keys = ["${digitalocean_ssh_key.foobar.id}"]
}
resource "digitalocean_floating_ip" "foobar" {
droplet_id = "${digitalocean_droplet.foobar.id}"
region = "${digitalocean_droplet.foobar.region}"
}`, testAccValidPublicKey)
| builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00022584294492844492,
0.0001759003644110635,
0.0001639331312617287,
0.00016784740728326142,
0.000017904343621921726
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\n",
"\tif !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`^[a-zA-Z0-9]`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 346
} | package azurerm
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
"github.com/jen20/riviera/dns"
)
func TestAccAzureRMDnsZone_basic(t *testing.T) {
ri := acctest.RandInt()
config := fmt.Sprintf(testAccAzureRMDnsZone_basic, ri, ri)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMDnsZoneDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: config,
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMDnsZoneExists("azurerm_dns_zone.test"),
),
},
},
})
}
func testCheckAzureRMDnsZoneExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Ensure we have enough information in state to look up in API
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
conn := testAccProvider.Meta().(*ArmClient).rivieraClient
readRequest := conn.NewRequestForURI(rs.Primary.ID)
readRequest.Command = &dns.GetDNSZone{}
readResponse, err := readRequest.Execute()
if err != nil {
return fmt.Errorf("Bad: GetDNSZone: %s", err)
}
if !readResponse.IsSuccessful() {
return fmt.Errorf("Bad: GetDNSZone: %s", readResponse.Error)
}
return nil
}
}
func testCheckAzureRMDnsZoneDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*ArmClient).rivieraClient
for _, rs := range s.RootModule().Resources {
if rs.Type != "azurerm_dns_zone" {
continue
}
readRequest := conn.NewRequestForURI(rs.Primary.ID)
readRequest.Command = &dns.GetDNSZone{}
readResponse, err := readRequest.Execute()
if err != nil {
return fmt.Errorf("Bad: GetDNSZone: %s", err)
}
if readResponse.IsSuccessful() {
return fmt.Errorf("Bad: DNS zone still exists: %s", readResponse.Error)
}
}
return nil
}
var testAccAzureRMDnsZone_basic = `
resource "azurerm_resource_group" "test" {
name = "acctestRG_%d"
location = "West US"
}
resource "azurerm_dns_zone" "test" {
name = "acctestzone%d.com"
resource_group_name = "${azurerm_resource_group.test.name}"
}
`
| builtin/providers/azurerm/resource_arm_dns_zone_test.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.0001762663305271417,
0.0001687824260443449,
0.00016471922572236508,
0.00016841072647366673,
0.000003181614601999172
] |
{
"id": 4,
"code_window": [
"\t}\n",
"\n",
"\tif !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {\n",
"\t\terrors = append(errors, fmt.Errorf(\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\tif !regexp.MustCompile(`^[a-zA-Z0-9]`).MatchString(value) {\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 346
} | package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSIAMSamlProvider_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckIAMSamlProviderDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccIAMSamlProviderConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"),
),
},
resource.TestStep{
Config: testAccIAMSamlProviderConfigUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckIAMSamlProvider("aws_iam_saml_provider.salesforce"),
),
},
},
})
}
func testAccCheckIAMSamlProviderDestroy(s *terraform.State) error {
iamconn := testAccProvider.Meta().(*AWSClient).iamconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_iam_saml_provider" {
continue
}
input := &iam.GetSAMLProviderInput{
SAMLProviderArn: aws.String(rs.Primary.ID),
}
out, err := iamconn.GetSAMLProvider(input)
if err != nil {
if iamerr, ok := err.(awserr.Error); ok && iamerr.Code() == "NoSuchEntity" {
// none found, that's good
return nil
}
return fmt.Errorf("Error reading IAM SAML Provider, out: %s, err: %s", out, err)
}
if out != nil {
return fmt.Errorf("Found IAM SAML Provider, expected none: %s", out)
}
}
return nil
}
func testAccCheckIAMSamlProvider(id string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[id]
if !ok {
return fmt.Errorf("Not Found: %s", id)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
iamconn := testAccProvider.Meta().(*AWSClient).iamconn
_, err := iamconn.GetSAMLProvider(&iam.GetSAMLProviderInput{
SAMLProviderArn: aws.String(rs.Primary.ID),
})
if err != nil {
return err
}
return nil
}
}
const testAccIAMSamlProviderConfig = `
resource "aws_iam_saml_provider" "salesforce" {
name = "tf-salesforce-test"
saml_metadata_document = "${file("./test-fixtures/saml-metadata.xml")}"
}
`
const testAccIAMSamlProviderConfigUpdate = `
resource "aws_iam_saml_provider" "salesforce" {
name = "tf-salesforce-test"
saml_metadata_document = "${file("./test-fixtures/saml-metadata-modified.xml")}"
}
`
| builtin/providers/aws/resource_aws_iam_saml_provider_test.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00017626615590415895,
0.00016816024435684085,
0.00016598700312897563,
0.00016767655324656516,
0.0000027154442250321154
] |
{
"id": 5,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q must start with a word character: %q\", k, value))\n",
"\t}\n",
"\n",
"\treturn\n",
"}"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"%q must start with a word character or number: %q\", k, value))\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 348
} | package azurerm
import (
"fmt"
"log"
"regexp"
"time"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/jen20/riviera/azure"
)
func resourceArmLoadBalancerRule() *schema.Resource {
return &schema.Resource{
Create: resourceArmLoadBalancerRuleCreate,
Read: resourceArmLoadBalancerRuleRead,
Update: resourceArmLoadBalancerRuleCreate,
Delete: resourceArmLoadBalancerRuleDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateArmLoadBalancerRuleName,
},
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: azureRMNormalizeLocation,
},
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"loadbalancer_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"frontend_ip_configuration_name": {
Type: schema.TypeString,
Required: true,
},
"frontend_ip_configuration_id": {
Type: schema.TypeString,
Computed: true,
},
"backend_address_pool_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"protocol": {
Type: schema.TypeString,
Required: true,
},
"frontend_port": {
Type: schema.TypeInt,
Required: true,
},
"backend_port": {
Type: schema.TypeInt,
Required: true,
},
"probe_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"enable_floating_ip": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"idle_timeout_in_minutes": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"load_distribution": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func resourceArmLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
_, _, exists = findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if exists {
return fmt.Errorf("A LoadBalancer Rule with name %q already exists.", d.Get("name").(string))
}
newLbRule, err := expandAzureRmLoadBalancerRule(d, loadBalancer)
if err != nil {
return errwrap.Wrapf("Error Exanding LoadBalancer Rule {{err}}", err)
}
lbRules := append(*loadBalancer.Properties.LoadBalancingRules, *newLbRule)
loadBalancer.Properties.LoadBalancingRules = &lbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
var rule_id string
for _, LoadBalancingRule := range *(*read.Properties).LoadBalancingRules {
if *LoadBalancingRule.Name == d.Get("name").(string) {
rule_id = *LoadBalancingRule.ID
}
}
if rule_id != "" {
d.SetId(rule_id)
} else {
return fmt.Errorf("Cannot find created LoadBalancer Rule ID %q", rule_id)
}
log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted", "Updating"},
Target: []string{"Succeeded"},
Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName),
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err)
}
return resourceArmLoadBalancerRuleRead(d, meta)
}
func resourceArmLoadBalancerRuleRead(d *schema.ResourceData, meta interface{}) error {
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
configs := *loadBalancer.Properties.LoadBalancingRules
for _, config := range configs {
if *config.Name == d.Get("name").(string) {
d.Set("name", config.Name)
d.Set("protocol", config.Properties.Protocol)
d.Set("frontend_port", config.Properties.FrontendPort)
d.Set("backend_port", config.Properties.BackendPort)
if config.Properties.EnableFloatingIP != nil {
d.Set("enable_floating_ip", config.Properties.EnableFloatingIP)
}
if config.Properties.IdleTimeoutInMinutes != nil {
d.Set("idle_timeout_in_minutes", config.Properties.IdleTimeoutInMinutes)
}
if config.Properties.FrontendIPConfiguration != nil {
d.Set("frontend_ip_configuration_id", config.Properties.FrontendIPConfiguration.ID)
}
if config.Properties.BackendAddressPool != nil {
d.Set("backend_address_pool_id", config.Properties.BackendAddressPool.ID)
}
if config.Properties.Probe != nil {
d.Set("probe_id", config.Properties.Probe.ID)
}
if config.Properties.LoadDistribution != "" {
d.Set("load_distribution", config.Properties.LoadDistribution)
}
}
}
return nil
}
func resourceArmLoadBalancerRuleDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
return nil
}
_, index, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if !exists {
return nil
}
oldLbRules := *loadBalancer.Properties.LoadBalancingRules
newLbRules := append(oldLbRules[:index], oldLbRules[index+1:]...)
loadBalancer.Properties.LoadBalancingRules = &newLbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
return nil
}
func expandAzureRmLoadBalancerRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.LoadBalancingRule, error) {
properties := network.LoadBalancingRulePropertiesFormat{
Protocol: network.TransportProtocol(d.Get("protocol").(string)),
FrontendPort: azure.Int32(int32(d.Get("frontend_port").(int))),
BackendPort: azure.Int32(int32(d.Get("backend_port").(int))),
EnableFloatingIP: azure.Bool(d.Get("enable_floating_ip").(bool)),
}
if v, ok := d.GetOk("idle_timeout_in_minutes"); ok {
properties.IdleTimeoutInMinutes = azure.Int32(int32(v.(int)))
}
if v := d.Get("load_distribution").(string); v != "" {
properties.LoadDistribution = network.LoadDistribution(v)
}
if v := d.Get("frontend_ip_configuration_name").(string); v != "" {
rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v)
if !exists {
return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v)
}
feip := network.SubResource{
ID: rule.ID,
}
properties.FrontendIPConfiguration = &feip
}
if v := d.Get("backend_address_pool_id").(string); v != "" {
beAP := network.SubResource{
ID: &v,
}
properties.BackendAddressPool = &beAP
}
if v := d.Get("probe_id").(string); v != "" {
pid := network.SubResource{
ID: &v,
}
properties.Probe = &pid
}
lbRule := network.LoadBalancingRule{
Name: azure.String(d.Get("name").(string)),
Properties: &properties,
}
return &lbRule, nil
}
func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only word characters and hyphens allowed in %q: %q",
k, value))
}
if len(value) > 80 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 80 characters: %q", k, value))
}
if len(value) == 0 {
errors = append(errors, fmt.Errorf(
"%q cannot be an empty string: %q", k, value))
}
if !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must end with a word character: %q", k, value))
}
if !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must start with a word character: %q", k, value))
}
return
}
| builtin/providers/azurerm/resource_arm_loadbalancer_rule.go | 1 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.997553288936615,
0.08111932873725891,
0.00016467478417325765,
0.00017001459491439164,
0.26855504512786865
] |
{
"id": 5,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q must start with a word character: %q\", k, value))\n",
"\t}\n",
"\n",
"\treturn\n",
"}"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"%q must start with a word character or number: %q\", k, value))\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 348
} | /*
Copyright (c) 2015 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
type Folder struct {
Common
InventoryPath string
}
func NewFolder(c *vim25.Client, ref types.ManagedObjectReference) *Folder {
return &Folder{
Common: NewCommon(c, ref),
}
}
func NewRootFolder(c *vim25.Client) *Folder {
f := NewFolder(c, c.ServiceContent.RootFolder)
f.InventoryPath = "/"
return f
}
func (f Folder) Children(ctx context.Context) ([]Reference, error) {
var mf mo.Folder
err := f.Properties(ctx, f.Reference(), []string{"childEntity"}, &mf)
if err != nil {
return nil, err
}
var rs []Reference
for _, e := range mf.ChildEntity {
if r := NewReference(f.c, e); r != nil {
rs = append(rs, r)
}
}
return rs, nil
}
func (f Folder) CreateDatacenter(ctx context.Context, datacenter string) (*Datacenter, error) {
req := types.CreateDatacenter{
This: f.Reference(),
Name: datacenter,
}
res, err := methods.CreateDatacenter(ctx, f.c, &req)
if err != nil {
return nil, err
}
// Response will be nil if this is an ESX host that does not belong to a vCenter
if res == nil {
return nil, nil
}
return NewDatacenter(f.c, res.Returnval), nil
}
func (f Folder) CreateCluster(ctx context.Context, cluster string, spec types.ClusterConfigSpecEx) (*ClusterComputeResource, error) {
req := types.CreateClusterEx{
This: f.Reference(),
Name: cluster,
Spec: spec,
}
res, err := methods.CreateClusterEx(ctx, f.c, &req)
if err != nil {
return nil, err
}
// Response will be nil if this is an ESX host that does not belong to a vCenter
if res == nil {
return nil, nil
}
return NewClusterComputeResource(f.c, res.Returnval), nil
}
func (f Folder) CreateFolder(ctx context.Context, name string) (*Folder, error) {
req := types.CreateFolder{
This: f.Reference(),
Name: name,
}
res, err := methods.CreateFolder(ctx, f.c, &req)
if err != nil {
return nil, err
}
return NewFolder(f.c, res.Returnval), err
}
func (f Folder) AddStandaloneHost(ctx context.Context, spec types.HostConnectSpec, addConnected bool, license *string, compResSpec *types.BaseComputeResourceConfigSpec) (*Task, error) {
req := types.AddStandaloneHost_Task{
This: f.Reference(),
Spec: spec,
AddConnected: addConnected,
}
if license != nil {
req.License = *license
}
if compResSpec != nil {
req.CompResSpec = *compResSpec
}
res, err := methods.AddStandaloneHost_Task(ctx, f.c, &req)
if err != nil {
return nil, err
}
return NewTask(f.c, res.Returnval), nil
}
func (f Folder) CreateVM(ctx context.Context, config types.VirtualMachineConfigSpec, pool *ResourcePool, host *HostSystem) (*Task, error) {
req := types.CreateVM_Task{
This: f.Reference(),
Config: config,
Pool: pool.Reference(),
}
if host != nil {
ref := host.Reference()
req.Host = &ref
}
res, err := methods.CreateVM_Task(ctx, f.c, &req)
if err != nil {
return nil, err
}
return NewTask(f.c, res.Returnval), nil
}
func (f Folder) RegisterVM(ctx context.Context, path string, name string, asTemplate bool, pool *ResourcePool, host *HostSystem) (*Task, error) {
req := types.RegisterVM_Task{
This: f.Reference(),
Path: path,
AsTemplate: asTemplate,
}
if name != "" {
req.Name = name
}
if host != nil {
ref := host.Reference()
req.Host = &ref
}
if pool != nil {
ref := pool.Reference()
req.Pool = &ref
}
res, err := methods.RegisterVM_Task(ctx, f.c, &req)
if err != nil {
return nil, err
}
return NewTask(f.c, res.Returnval), nil
}
func (f Folder) CreateDVS(ctx context.Context, spec types.DVSCreateSpec) (*Task, error) {
req := types.CreateDVS_Task{
This: f.Reference(),
Spec: spec,
}
res, err := methods.CreateDVS_Task(ctx, f.c, &req)
if err != nil {
return nil, err
}
return NewTask(f.c, res.Returnval), nil
}
func (f Folder) MoveInto(ctx context.Context, list []types.ManagedObjectReference) (*Task, error) {
req := types.MoveIntoFolder_Task{
This: f.Reference(),
List: list,
}
res, err := methods.MoveIntoFolder_Task(ctx, f.c, &req)
if err != nil {
return nil, err
}
return NewTask(f.c, res.Returnval), nil
}
| vendor/github.com/vmware/govmomi/object/folder.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00017925720021594316,
0.00017142145952675492,
0.0001637931272853166,
0.0001710986834950745,
0.000003854581791529199
] |
{
"id": 5,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q must start with a word character: %q\", k, value))\n",
"\t}\n",
"\n",
"\treturn\n",
"}"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"%q must start with a word character or number: %q\", k, value))\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 348
} | package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/redshift"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSRedshiftSecurityGroup_ingressCidr(t *testing.T) {
var v redshift.ClusterSecurityGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidr,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "name", "redshift-sg-terraform"),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "description", "Managed by Terraform"),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.2735652665.cidr", "10.0.0.1/24"),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.#", "1"),
),
},
},
})
}
func TestAccAWSRedshiftSecurityGroup_updateIngressCidr(t *testing.T) {
var v redshift.ClusterSecurityGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidr,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.#", "1"),
),
},
resource.TestStep{
Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidrAdd,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.#", "3"),
),
},
resource.TestStep{
Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidrReduce,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.#", "2"),
),
},
},
})
}
func TestAccAWSRedshiftSecurityGroup_ingressSecurityGroup(t *testing.T) {
var v redshift.ClusterSecurityGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgId,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "name", "redshift-sg-terraform"),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "description", "this is a description"),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.#", "1"),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.2230908922.security_group_name", "terraform_redshift_acceptance_test"),
),
},
},
})
}
func TestAccAWSRedshiftSecurityGroup_updateIngressSecurityGroup(t *testing.T) {
var v redshift.ClusterSecurityGroup
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgId,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.#", "1"),
),
},
resource.TestStep{
Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgIdAdd,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.#", "3"),
),
},
resource.TestStep{
Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgIdReduce,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v),
resource.TestCheckResourceAttr(
"aws_redshift_security_group.bar", "ingress.#", "2"),
),
},
},
})
}
func testAccCheckAWSRedshiftSecurityGroupExists(n string, v *redshift.ClusterSecurityGroup) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No Redshift Security Group ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).redshiftconn
opts := redshift.DescribeClusterSecurityGroupsInput{
ClusterSecurityGroupName: aws.String(rs.Primary.ID),
}
resp, err := conn.DescribeClusterSecurityGroups(&opts)
if err != nil {
return err
}
if len(resp.ClusterSecurityGroups) != 1 ||
*resp.ClusterSecurityGroups[0].ClusterSecurityGroupName != rs.Primary.ID {
return fmt.Errorf("Redshift Security Group not found")
}
*v = *resp.ClusterSecurityGroups[0]
return nil
}
}
func testAccCheckAWSRedshiftSecurityGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).redshiftconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_redshift_security_group" {
continue
}
// Try to find the Group
resp, err := conn.DescribeClusterSecurityGroups(
&redshift.DescribeClusterSecurityGroupsInput{
ClusterSecurityGroupName: aws.String(rs.Primary.ID),
})
if err == nil {
if len(resp.ClusterSecurityGroups) != 0 &&
*resp.ClusterSecurityGroups[0].ClusterSecurityGroupName == rs.Primary.ID {
return fmt.Errorf("Redshift Security Group still exists")
}
}
// Verify the error
newerr, ok := err.(awserr.Error)
if !ok {
return err
}
if newerr.Code() != "ClusterSecurityGroupNotFound" {
return err
}
}
return nil
}
func TestResourceAWSRedshiftSecurityGroupNameValidation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "default",
ErrCount: 1,
},
{
Value: "testing123%%",
ErrCount: 1,
},
{
Value: "TestingSG",
ErrCount: 1,
},
{
Value: randomString(256),
ErrCount: 1,
},
}
for _, tc := range cases {
_, errors := validateRedshiftSecurityGroupName(tc.Value, "aws_redshift_security_group_name")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the Redshift Security Group Name to trigger a validation error")
}
}
}
const testAccAWSRedshiftSecurityGroupConfig_ingressCidr = `
provider "aws" {
region = "us-east-1"
}
resource "aws_redshift_security_group" "bar" {
name = "redshift-sg-terraform"
ingress {
cidr = "10.0.0.1/24"
}
}`
const testAccAWSRedshiftSecurityGroupConfig_ingressCidrAdd = `
provider "aws" {
region = "us-east-1"
}
resource "aws_redshift_security_group" "bar" {
name = "redshift-sg-terraform"
description = "this is a description"
ingress {
cidr = "10.0.0.1/24"
}
ingress {
cidr = "10.0.10.1/24"
}
ingress {
cidr = "10.0.20.1/24"
}
}`
const testAccAWSRedshiftSecurityGroupConfig_ingressCidrReduce = `
provider "aws" {
region = "us-east-1"
}
resource "aws_redshift_security_group" "bar" {
name = "redshift-sg-terraform"
description = "this is a description"
ingress {
cidr = "10.0.0.1/24"
}
ingress {
cidr = "10.0.10.1/24"
}
}`
const testAccAWSRedshiftSecurityGroupConfig_ingressSgId = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "redshift" {
name = "terraform_redshift_acceptance_test"
description = "Used in the redshift acceptance tests"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["10.0.0.0/8"]
}
}
resource "aws_redshift_security_group" "bar" {
name = "redshift-sg-terraform"
description = "this is a description"
ingress {
security_group_name = "${aws_security_group.redshift.name}"
security_group_owner_id = "${aws_security_group.redshift.owner_id}"
}
}`
const testAccAWSRedshiftSecurityGroupConfig_ingressSgIdAdd = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "redshift" {
name = "terraform_redshift_acceptance_test"
description = "Used in the redshift acceptance tests"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["10.0.0.0/16"]
}
}
resource "aws_security_group" "redshift2" {
name = "terraform_redshift_acceptance_test_2"
description = "Used in the redshift acceptance tests #2"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["10.1.0.0/16"]
}
}
resource "aws_security_group" "redshift3" {
name = "terraform_redshift_acceptance_test_3"
description = "Used in the redshift acceptance tests #3"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["10.2.0.0/16"]
}
}
resource "aws_redshift_security_group" "bar" {
name = "redshift-sg-terraform"
description = "this is a description"
ingress {
security_group_name = "${aws_security_group.redshift.name}"
security_group_owner_id = "${aws_security_group.redshift.owner_id}"
}
ingress {
security_group_name = "${aws_security_group.redshift2.name}"
security_group_owner_id = "${aws_security_group.redshift.owner_id}"
}
ingress {
security_group_name = "${aws_security_group.redshift3.name}"
security_group_owner_id = "${aws_security_group.redshift.owner_id}"
}
}`
const testAccAWSRedshiftSecurityGroupConfig_ingressSgIdReduce = `
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "redshift" {
name = "terraform_redshift_acceptance_test"
description = "Used in the redshift acceptance tests"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["10.0.0.0/16"]
}
}
resource "aws_security_group" "redshift2" {
name = "terraform_redshift_acceptance_test_2"
description = "Used in the redshift acceptance tests #2"
ingress {
protocol = "tcp"
from_port = 22
to_port = 22
cidr_blocks = ["10.1.0.0/16"]
}
}
resource "aws_redshift_security_group" "bar" {
name = "redshift-sg-terraform"
description = "this is a description"
ingress {
security_group_name = "${aws_security_group.redshift.name}"
security_group_owner_id = "${aws_security_group.redshift.owner_id}"
}
ingress {
security_group_name = "${aws_security_group.redshift2.name}"
security_group_owner_id = "${aws_security_group.redshift.owner_id}"
}
}`
| builtin/providers/aws/resource_aws_redshift_security_group_test.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.003774258540943265,
0.0002933426876552403,
0.00016472127754241228,
0.00016839435556903481,
0.0005499179824255407
] |
{
"id": 5,
"code_window": [
"\t\terrors = append(errors, fmt.Errorf(\n",
"\t\t\t\"%q must start with a word character: %q\", k, value))\n",
"\t}\n",
"\n",
"\treturn\n",
"}"
],
"labels": [
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\"%q must start with a word character or number: %q\", k, value))\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule.go",
"type": "replace",
"edit_start_line_idx": 348
} | ---
layout: "inner"
noindex: true
page_title: "404"
---
<h1>Page not found</h1>
<p>
Unfortunately, the page you requested can't be found.
</p>
| website/source/404.html.erb | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00017997993563767523,
0.0001760736631695181,
0.00017216740525327623,
0.0001760736631695181,
0.000003906265192199498
] |
{
"id": 6,
"code_window": [
"\t\t\tErrCount: 1,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tValue: \"test123test\",\n",
"\t\t\tErrCount: 1,\n",
"\t\t},\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tValue: \"test#test\",\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go",
"type": "replace",
"edit_start_line_idx": 27
} | package azurerm
import (
"fmt"
"os"
"testing"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestResourceAzureRMLoadBalancerRuleNameLabel_validation(t *testing.T) {
cases := []struct {
Value string
ErrCount int
}{
{
Value: "-word",
ErrCount: 1,
},
{
Value: "testing-",
ErrCount: 1,
},
{
Value: "test123test",
ErrCount: 1,
},
{
Value: acctest.RandStringFromCharSet(81, "abcdedfed"),
ErrCount: 1,
},
{
Value: "test.rule",
ErrCount: 0,
},
{
Value: "test_rule",
ErrCount: 0,
},
{
Value: "test-rule",
ErrCount: 0,
},
{
Value: "TestRule",
ErrCount: 0,
},
}
for _, tc := range cases {
_, errors := validateArmLoadBalancerRuleName(tc.Value, "azurerm_lb_rule")
if len(errors) != tc.ErrCount {
t.Fatalf("Expected the Azure RM LoadBalancer Rule Name Label to trigger a validation error")
}
}
}
func TestAccAzureRMLoadBalancerRule_basic(t *testing.T) {
var lb network.LoadBalancer
ri := acctest.RandInt()
lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha))
subscriptionID := os.Getenv("ARM_SUBSCRIPTION_ID")
lbRule_id := fmt.Sprintf(
"/subscriptions/%s/resourceGroups/acctestrg-%d/providers/Microsoft.Network/loadBalancers/arm-test-loadbalancer-%d/loadBalancingRules/%s",
subscriptionID, ri, ri, lbRuleName)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
Steps: []resource.TestStep{
{
Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb),
resource.TestCheckResourceAttr(
"azurerm_lb_rule.test", "id", lbRule_id),
),
},
},
})
}
func TestAccAzureRMLoadBalancerRule_removal(t *testing.T) {
var lb network.LoadBalancer
ri := acctest.RandInt()
lbRuleName := fmt.Sprintf("LbRule-%s", acctest.RandStringFromCharSet(8, acctest.CharSetAlpha))
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testCheckAzureRMLoadBalancerDestroy,
Steps: []resource.TestStep{
{
Config: testAccAzureRMLoadBalancerRule_basic(ri, lbRuleName),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
testCheckAzureRMLoadBalancerRuleExists(lbRuleName, &lb),
),
},
{
Config: testAccAzureRMLoadBalancerRule_removal(ri),
Check: resource.ComposeTestCheckFunc(
testCheckAzureRMLoadBalancerExists("azurerm_lb.test", &lb),
testCheckAzureRMLoadBalancerRuleNotExists(lbRuleName, &lb),
),
},
},
})
}
func testCheckAzureRMLoadBalancerRuleExists(lbRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, _, exists := findLoadBalancerRuleByName(lb, lbRuleName)
if !exists {
return fmt.Errorf("A LoadBalancer Rule with name %q cannot be found.", lbRuleName)
}
return nil
}
}
func testCheckAzureRMLoadBalancerRuleNotExists(lbRuleName string, lb *network.LoadBalancer) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, _, exists := findLoadBalancerRuleByName(lb, lbRuleName)
if exists {
return fmt.Errorf("A LoadBalancer Rule with name %q has been found.", lbRuleName)
}
return nil
}
}
func testAccAzureRMLoadBalancerRule_basic(rInt int, lbRuleName string) string {
return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestrg-%d"
location = "West US"
}
resource "azurerm_public_ip" "test" {
name = "test-ip-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
public_ip_address_allocation = "static"
}
resource "azurerm_lb" "test" {
name = "arm-test-loadbalancer-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
frontend_ip_configuration {
name = "one-%d"
public_ip_address_id = "${azurerm_public_ip.test.id}"
}
}
resource "azurerm_lb_rule" "test" {
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
loadbalancer_id = "${azurerm_lb.test.id}"
name = "%s"
protocol = "Tcp"
frontend_port = 3389
backend_port = 3389
frontend_ip_configuration_name = "one-%d"
}
`, rInt, rInt, rInt, rInt, lbRuleName, rInt)
}
func testAccAzureRMLoadBalancerRule_removal(rInt int) string {
return fmt.Sprintf(`
resource "azurerm_resource_group" "test" {
name = "acctestrg-%d"
location = "West US"
}
resource "azurerm_public_ip" "test" {
name = "test-ip-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
public_ip_address_allocation = "static"
}
resource "azurerm_lb" "test" {
name = "arm-test-loadbalancer-%d"
location = "West US"
resource_group_name = "${azurerm_resource_group.test.name}"
frontend_ip_configuration {
name = "one-%d"
public_ip_address_id = "${azurerm_public_ip.test.id}"
}
}
`, rInt, rInt, rInt, rInt)
}
| builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go | 1 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.9606124758720398,
0.0459614135324955,
0.00016449634858872741,
0.00017030606977641582,
0.2045222669839859
] |
{
"id": 6,
"code_window": [
"\t\t\tErrCount: 1,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tValue: \"test123test\",\n",
"\t\t\tErrCount: 1,\n",
"\t\t},\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tValue: \"test#test\",\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go",
"type": "replace",
"edit_start_line_idx": 27
} | package terraform
import (
"reflect"
"sort"
"strings"
"sync"
"testing"
)
func TestContext2Refresh(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-basic")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
},
},
},
},
},
},
})
p.RefreshFn = nil
p.RefreshReturn = &InstanceState{
ID: "foo",
}
s, err := ctx.Refresh()
mod := s.RootModule()
if err != nil {
t.Fatalf("err: %s", err)
}
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
if p.RefreshState.ID != "foo" {
t.Fatalf("bad: %#v", p.RefreshState)
}
if !reflect.DeepEqual(mod.Resources["aws_instance.web"].Primary, p.RefreshReturn) {
t.Fatalf("bad: %#v %#v", mod.Resources["aws_instance.web"], p.RefreshReturn)
}
for _, r := range mod.Resources {
if r.Type == "" {
t.Fatalf("no type: %#v", r)
}
}
}
func TestContext2Refresh_targeted(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-targeted")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_vpc.metoo": resourceState("aws_vpc", "vpc-abc123"),
"aws_instance.notme": resourceState("aws_instance", "i-bcd345"),
"aws_instance.me": resourceState("aws_instance", "i-abc123"),
"aws_elb.meneither": resourceState("aws_elb", "lb-abc123"),
},
},
},
},
Targets: []string{"aws_instance.me"},
})
refreshedResources := make([]string, 0, 2)
p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) {
refreshedResources = append(refreshedResources, i.Id)
return is, nil
}
_, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
expected := []string{"aws_vpc.metoo", "aws_instance.me"}
if !reflect.DeepEqual(refreshedResources, expected) {
t.Fatalf("expected: %#v, got: %#v", expected, refreshedResources)
}
}
func TestContext2Refresh_targetedCount(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-targeted-count")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_vpc.metoo": resourceState("aws_vpc", "vpc-abc123"),
"aws_instance.notme": resourceState("aws_instance", "i-bcd345"),
"aws_instance.me.0": resourceState("aws_instance", "i-abc123"),
"aws_instance.me.1": resourceState("aws_instance", "i-cde567"),
"aws_instance.me.2": resourceState("aws_instance", "i-cde789"),
"aws_elb.meneither": resourceState("aws_elb", "lb-abc123"),
},
},
},
},
Targets: []string{"aws_instance.me"},
})
refreshedResources := make([]string, 0, 2)
p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) {
refreshedResources = append(refreshedResources, i.Id)
return is, nil
}
_, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
// Target didn't specify index, so we should get all our instances
expected := []string{
"aws_vpc.metoo",
"aws_instance.me.0",
"aws_instance.me.1",
"aws_instance.me.2",
}
sort.Strings(expected)
sort.Strings(refreshedResources)
if !reflect.DeepEqual(refreshedResources, expected) {
t.Fatalf("expected: %#v, got: %#v", expected, refreshedResources)
}
}
func TestContext2Refresh_targetedCountIndex(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-targeted-count")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_vpc.metoo": resourceState("aws_vpc", "vpc-abc123"),
"aws_instance.notme": resourceState("aws_instance", "i-bcd345"),
"aws_instance.me.0": resourceState("aws_instance", "i-abc123"),
"aws_instance.me.1": resourceState("aws_instance", "i-cde567"),
"aws_instance.me.2": resourceState("aws_instance", "i-cde789"),
"aws_elb.meneither": resourceState("aws_elb", "lb-abc123"),
},
},
},
},
Targets: []string{"aws_instance.me[0]"},
})
refreshedResources := make([]string, 0, 2)
p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) {
refreshedResources = append(refreshedResources, i.Id)
return is, nil
}
_, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
expected := []string{"aws_vpc.metoo", "aws_instance.me.0"}
if !reflect.DeepEqual(refreshedResources, expected) {
t.Fatalf("expected: %#v, got: %#v", expected, refreshedResources)
}
}
func TestContext2Refresh_moduleComputedVar(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-module-computed-var")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
// This was failing (see GH-2188) at some point, so this test just
// verifies that the failure goes away.
if _, err := ctx.Refresh(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestContext2Refresh_delete(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-basic")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
},
},
},
},
},
},
})
p.RefreshFn = nil
p.RefreshReturn = nil
s, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
mod := s.RootModule()
if len(mod.Resources) > 0 {
t.Fatal("resources should be empty")
}
}
func TestContext2Refresh_ignoreUncreated(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-basic")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: nil,
})
p.RefreshFn = nil
p.RefreshReturn = &InstanceState{
ID: "foo",
}
_, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
if p.RefreshCalled {
t.Fatal("refresh should not be called")
}
}
func TestContext2Refresh_hook(t *testing.T) {
h := new(MockHook)
p := testProvider("aws")
m := testModule(t, "refresh-basic")
ctx := testContext2(t, &ContextOpts{
Module: m,
Hooks: []Hook{h},
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
},
},
},
},
},
},
})
if _, err := ctx.Refresh(); err != nil {
t.Fatalf("err: %s", err)
}
if !h.PreRefreshCalled {
t.Fatal("should be called")
}
if !h.PostRefreshCalled {
t.Fatal("should be called")
}
}
func TestContext2Refresh_modules(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-modules")
state := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Tainted: true,
},
},
},
},
&ModuleState{
Path: []string{"root", "child"},
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "baz",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: state,
})
p.RefreshFn = func(info *InstanceInfo, s *InstanceState) (*InstanceState, error) {
if s.ID != "baz" {
return s, nil
}
s.ID = "new"
return s, nil
}
s, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(s.String())
expected := strings.TrimSpace(testContextRefreshModuleStr)
if actual != expected {
t.Fatalf("bad:\n\n%s\n\n%s", actual, expected)
}
}
func TestContext2Refresh_moduleInputComputedOutput(t *testing.T) {
m := testModule(t, "refresh-module-input-computed-output")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
if _, err := ctx.Refresh(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestContext2Refresh_moduleVarModule(t *testing.T) {
m := testModule(t, "refresh-module-var-module")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
if _, err := ctx.Refresh(); err != nil {
t.Fatalf("err: %s", err)
}
}
// GH-70
func TestContext2Refresh_noState(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-no-state")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
p.RefreshFn = nil
p.RefreshReturn = &InstanceState{
ID: "foo",
}
if _, err := ctx.Refresh(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestContext2Refresh_output(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-output")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
Attributes: map[string]string{
"foo": "bar",
},
},
},
},
Outputs: map[string]*OutputState{
"foo": &OutputState{
Type: "string",
Sensitive: false,
Value: "foo",
},
},
},
},
},
})
p.RefreshFn = func(info *InstanceInfo, s *InstanceState) (*InstanceState, error) {
return s, nil
}
s, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(s.String())
expected := strings.TrimSpace(testContextRefreshOutputStr)
if actual != expected {
t.Fatalf("bad:\n\n%s\n\n%s", actual, expected)
}
}
func TestContext2Refresh_outputPartial(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-output-partial")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
},
},
},
Outputs: map[string]*OutputState{},
},
},
},
})
p.RefreshFn = nil
p.RefreshReturn = nil
s, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(s.String())
expected := strings.TrimSpace(testContextRefreshOutputPartialStr)
if actual != expected {
t.Fatalf("bad:\n\n%s\n\n%s", actual, expected)
}
}
func TestContext2Refresh_state(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-basic")
state := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Primary: &InstanceState{
ID: "bar",
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: state,
})
p.RefreshFn = nil
p.RefreshReturn = &InstanceState{
ID: "foo",
}
s, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
originalMod := state.RootModule()
mod := s.RootModule()
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
if !reflect.DeepEqual(p.RefreshState, originalMod.Resources["aws_instance.web"].Primary) {
t.Fatalf(
"bad:\n\n%#v\n\n%#v",
p.RefreshState,
originalMod.Resources["aws_instance.web"].Primary)
}
if !reflect.DeepEqual(mod.Resources["aws_instance.web"].Primary, p.RefreshReturn) {
t.Fatalf("bad: %#v", mod.Resources)
}
}
func TestContext2Refresh_dataState(t *testing.T) {
p := testProvider("null")
m := testModule(t, "refresh-data-resource-basic")
state := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
// Intentionally no resources since data resources are
// supposed to refresh themselves even if they didn't
// already exist.
Resources: map[string]*ResourceState{},
},
},
}
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"null": testProviderFuncFixed(p),
},
State: state,
})
p.ReadDataDiffFn = nil
p.ReadDataDiffReturn = &InstanceDiff{
Attributes: map[string]*ResourceAttrDiff{
"inputs.#": {
Old: "0",
New: "1",
Type: DiffAttrInput,
},
"inputs.test": {
Old: "",
New: "yes",
Type: DiffAttrInput,
},
"outputs.#": {
Old: "",
New: "",
NewComputed: true,
Type: DiffAttrOutput,
},
},
}
p.ReadDataApplyFn = nil
p.ReadDataApplyReturn = &InstanceState{
ID: "-",
}
s, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
if !p.ReadDataDiffCalled {
t.Fatal("ReadDataDiff should have been called")
}
if !p.ReadDataApplyCalled {
t.Fatal("ReadDataApply should have been called")
}
mod := s.RootModule()
if got := mod.Resources["data.null_data_source.testing"].Primary.ID; got != "-" {
t.Fatalf("resource id is %q; want %s", got, "-")
}
if !reflect.DeepEqual(mod.Resources["data.null_data_source.testing"].Primary, p.ReadDataApplyReturn) {
t.Fatalf("bad: %#v", mod.Resources)
}
}
func TestContext2Refresh_tainted(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-basic")
state := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "bar",
Tainted: true,
},
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: state,
})
p.RefreshFn = nil
p.RefreshReturn = &InstanceState{
ID: "foo",
Tainted: true,
}
s, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
actual := strings.TrimSpace(s.String())
expected := strings.TrimSpace(testContextRefreshTaintedStr)
if actual != expected {
t.Fatalf("bad:\n\n%s\n\n%s", actual, expected)
}
}
// Doing a Refresh (or any operation really, but Refresh usually
// happens first) with a config with an unknown provider should result in
// an error. The key bug this found was that this wasn't happening if
// Providers was _empty_.
func TestContext2Refresh_unknownProvider(t *testing.T) {
m := testModule(t, "refresh-unknown-provider")
p := testProvider("aws")
p.ApplyFn = testApplyFn
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{},
})
if _, err := ctx.Refresh(); err == nil {
t.Fatal("should error")
}
}
func TestContext2Refresh_vars(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-vars")
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.web": &ResourceState{
Type: "aws_instance",
Primary: &InstanceState{
ID: "foo",
},
},
},
},
},
},
})
p.RefreshFn = nil
p.RefreshReturn = &InstanceState{
ID: "foo",
}
s, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
mod := s.RootModule()
if !p.RefreshCalled {
t.Fatal("refresh should be called")
}
if p.RefreshState.ID != "foo" {
t.Fatalf("bad: %#v", p.RefreshState)
}
if !reflect.DeepEqual(mod.Resources["aws_instance.web"].Primary, p.RefreshReturn) {
t.Fatalf("bad: %#v", mod.Resources["aws_instance.web"])
}
for _, r := range mod.Resources {
if r.Type == "" {
t.Fatalf("no type: %#v", r)
}
}
}
func TestContext2Refresh_orphanModule(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "refresh-module-orphan")
// Create a custom refresh function to track the order they were visited
var order []string
var orderLock sync.Mutex
p.RefreshFn = func(
info *InstanceInfo,
is *InstanceState) (*InstanceState, error) {
orderLock.Lock()
defer orderLock.Unlock()
order = append(order, is.ID)
return is, nil
}
state := &State{
Modules: []*ModuleState{
&ModuleState{
Path: rootModulePath,
Resources: map[string]*ResourceState{
"aws_instance.foo": &ResourceState{
Primary: &InstanceState{
ID: "i-abc123",
Attributes: map[string]string{
"childid": "i-bcd234",
"grandchildid": "i-cde345",
},
},
Dependencies: []string{
"module.child",
"module.child",
},
},
},
},
&ModuleState{
Path: append(rootModulePath, "child"),
Resources: map[string]*ResourceState{
"aws_instance.bar": &ResourceState{
Primary: &InstanceState{
ID: "i-bcd234",
Attributes: map[string]string{
"grandchildid": "i-cde345",
},
},
Dependencies: []string{
"module.grandchild",
},
},
},
Outputs: map[string]*OutputState{
"id": &OutputState{
Value: "i-bcd234",
Type: "string",
},
"grandchild_id": &OutputState{
Value: "i-cde345",
Type: "string",
},
},
},
&ModuleState{
Path: append(rootModulePath, "child", "grandchild"),
Resources: map[string]*ResourceState{
"aws_instance.baz": &ResourceState{
Primary: &InstanceState{
ID: "i-cde345",
},
},
},
Outputs: map[string]*OutputState{
"id": &OutputState{
Value: "i-cde345",
Type: "string",
},
},
},
},
}
ctx := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
State: state,
})
testCheckDeadlock(t, func() {
_, err := ctx.Refresh()
if err != nil {
t.Fatalf("err: %s", err)
}
// TODO: handle order properly for orphaned modules / resources
// expected := []string{"i-abc123", "i-bcd234", "i-cde345"}
// if !reflect.DeepEqual(order, expected) {
// t.Fatalf("expected: %#v, got: %#v", expected, order)
// }
})
}
func TestContext2Validate(t *testing.T) {
p := testProvider("aws")
m := testModule(t, "validate-good")
c := testContext2(t, &ContextOpts{
Module: m,
Providers: map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
})
w, e := c.Validate()
if len(w) > 0 {
t.Fatalf("bad: %#v", w)
}
if len(e) > 0 {
t.Fatalf("bad: %s", e)
}
}
| terraform/context_refresh_test.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.0001894193555926904,
0.0001695701212156564,
0.00016250350745394826,
0.0001694870152277872,
0.00000372087902178464
] |
{
"id": 6,
"code_window": [
"\t\t\tErrCount: 1,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tValue: \"test123test\",\n",
"\t\t\tErrCount: 1,\n",
"\t\t},\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tValue: \"test#test\",\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go",
"type": "replace",
"edit_start_line_idx": 27
} | package terraform
import (
"fmt"
"github.com/hashicorp/terraform/dag"
)
// GraphNodeOutput is an interface that nodes that are outputs must
// implement. The OutputName returned is the name of the output key
// that they manage.
type GraphNodeOutput interface {
OutputName() string
}
// AddOutputOrphanTransformer is a transformer that adds output orphans
// to the graph. Output orphans are outputs that are no longer in the
// configuration and therefore need to be removed from the state.
type AddOutputOrphanTransformer struct {
State *State
}
func (t *AddOutputOrphanTransformer) Transform(g *Graph) error {
// Get the state for this module. If we have no state, we have no orphans
state := t.State.ModuleByPath(g.Path)
if state == nil {
return nil
}
// Create the set of outputs we do have in the graph
found := make(map[string]struct{})
for _, v := range g.Vertices() {
on, ok := v.(GraphNodeOutput)
if !ok {
continue
}
found[on.OutputName()] = struct{}{}
}
// Go over all the outputs. If we don't have a graph node for it,
// create it. It doesn't need to depend on anything, since its just
// setting it empty.
for k, _ := range state.Outputs {
if _, ok := found[k]; ok {
continue
}
g.Add(&graphNodeOrphanOutput{OutputName: k})
}
return nil
}
type graphNodeOrphanOutput struct {
OutputName string
}
func (n *graphNodeOrphanOutput) Name() string {
return fmt.Sprintf("output.%s (orphan)", n.OutputName)
}
func (n *graphNodeOrphanOutput) EvalTree() EvalNode {
return &EvalOpFilter{
Ops: []walkOperation{walkApply, walkDestroy, walkRefresh},
Node: &EvalDeleteOutput{
Name: n.OutputName,
},
}
}
// GraphNodeFlattenable impl.
func (n *graphNodeOrphanOutput) Flatten(p []string) (dag.Vertex, error) {
return &graphNodeOrphanOutputFlat{
graphNodeOrphanOutput: n,
PathValue: p,
}, nil
}
type graphNodeOrphanOutputFlat struct {
*graphNodeOrphanOutput
PathValue []string
}
func (n *graphNodeOrphanOutputFlat) Name() string {
return fmt.Sprintf(
"%s.%s", modulePrefixStr(n.PathValue), n.graphNodeOrphanOutput.Name())
}
func (n *graphNodeOrphanOutputFlat) EvalTree() EvalNode {
return &EvalOpFilter{
Ops: []walkOperation{walkApply, walkDestroy, walkRefresh},
Node: &EvalDeleteOutput{
Name: n.OutputName,
},
}
}
| terraform/transform_output.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.0006877479027025402,
0.000328609487041831,
0.000165799239766784,
0.0002036864316323772,
0.00020555703667923808
] |
{
"id": 6,
"code_window": [
"\t\t\tErrCount: 1,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tValue: \"test123test\",\n",
"\t\t\tErrCount: 1,\n",
"\t\t},\n",
"\t\t{\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tValue: \"test#test\",\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go",
"type": "replace",
"edit_start_line_idx": 27
} | # Elastic IP Example
The eip example launches a web server, installs nginx. It also creates security group
To run, configure your AWS provider as described in https://www.terraform.io/docs/providers/aws/index.html
Running the example
run `terraform apply -var 'key_name={your_key_name}'`
Give couple of mins for userdata to install nginx, and then type the Elastic IP from outputs in your browser and see the nginx welcome page
| examples/aws-eip/README.md | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00016896598390303552,
0.0001686224713921547,
0.00016827895888127387,
0.0001686224713921547,
3.435125108808279e-7
] |
{
"id": 7,
"code_window": [
"\t\t\tValue: \"TestRule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\tfor _, tc := range cases {\n",
"\t\t_, errors := validateArmLoadBalancerRuleName(tc.Value, \"azurerm_lb_rule\")\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t{\n",
"\t\t\tValue: \"Test123Rule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tValue: \"TestRule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go",
"type": "add",
"edit_start_line_idx": 50
} | package azurerm
import (
"fmt"
"log"
"regexp"
"time"
"github.com/Azure/azure-sdk-for-go/arm/network"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/jen20/riviera/azure"
)
func resourceArmLoadBalancerRule() *schema.Resource {
return &schema.Resource{
Create: resourceArmLoadBalancerRuleCreate,
Read: resourceArmLoadBalancerRuleRead,
Update: resourceArmLoadBalancerRuleCreate,
Delete: resourceArmLoadBalancerRuleDelete,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateArmLoadBalancerRuleName,
},
"location": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: azureRMNormalizeLocation,
},
"resource_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"loadbalancer_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"frontend_ip_configuration_name": {
Type: schema.TypeString,
Required: true,
},
"frontend_ip_configuration_id": {
Type: schema.TypeString,
Computed: true,
},
"backend_address_pool_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"protocol": {
Type: schema.TypeString,
Required: true,
},
"frontend_port": {
Type: schema.TypeInt,
Required: true,
},
"backend_port": {
Type: schema.TypeInt,
Required: true,
},
"probe_id": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
"enable_floating_ip": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"idle_timeout_in_minutes": {
Type: schema.TypeInt,
Optional: true,
Computed: true,
},
"load_distribution": {
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func resourceArmLoadBalancerRuleCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
_, _, exists = findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if exists {
return fmt.Errorf("A LoadBalancer Rule with name %q already exists.", d.Get("name").(string))
}
newLbRule, err := expandAzureRmLoadBalancerRule(d, loadBalancer)
if err != nil {
return errwrap.Wrapf("Error Exanding LoadBalancer Rule {{err}}", err)
}
lbRules := append(*loadBalancer.Properties.LoadBalancingRules, *newLbRule)
loadBalancer.Properties.LoadBalancingRules = &lbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
var rule_id string
for _, LoadBalancingRule := range *(*read.Properties).LoadBalancingRules {
if *LoadBalancingRule.Name == d.Get("name").(string) {
rule_id = *LoadBalancingRule.ID
}
}
if rule_id != "" {
d.SetId(rule_id)
} else {
return fmt.Errorf("Cannot find created LoadBalancer Rule ID %q", rule_id)
}
log.Printf("[DEBUG] Waiting for LoadBalancer (%s) to become available", loadBalancerName)
stateConf := &resource.StateChangeConf{
Pending: []string{"Accepted", "Updating"},
Target: []string{"Succeeded"},
Refresh: loadbalancerStateRefreshFunc(client, resGroup, loadBalancerName),
Timeout: 10 * time.Minute,
}
if _, err := stateConf.WaitForState(); err != nil {
return fmt.Errorf("Error waiting for LoadBalancer (%s) to become available: %s", loadBalancerName, err)
}
return resourceArmLoadBalancerRuleRead(d, meta)
}
func resourceArmLoadBalancerRuleRead(d *schema.ResourceData, meta interface{}) error {
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
log.Printf("[INFO] LoadBalancer %q not found. Removing from state", d.Get("name").(string))
return nil
}
configs := *loadBalancer.Properties.LoadBalancingRules
for _, config := range configs {
if *config.Name == d.Get("name").(string) {
d.Set("name", config.Name)
d.Set("protocol", config.Properties.Protocol)
d.Set("frontend_port", config.Properties.FrontendPort)
d.Set("backend_port", config.Properties.BackendPort)
if config.Properties.EnableFloatingIP != nil {
d.Set("enable_floating_ip", config.Properties.EnableFloatingIP)
}
if config.Properties.IdleTimeoutInMinutes != nil {
d.Set("idle_timeout_in_minutes", config.Properties.IdleTimeoutInMinutes)
}
if config.Properties.FrontendIPConfiguration != nil {
d.Set("frontend_ip_configuration_id", config.Properties.FrontendIPConfiguration.ID)
}
if config.Properties.BackendAddressPool != nil {
d.Set("backend_address_pool_id", config.Properties.BackendAddressPool.ID)
}
if config.Properties.Probe != nil {
d.Set("probe_id", config.Properties.Probe.ID)
}
if config.Properties.LoadDistribution != "" {
d.Set("load_distribution", config.Properties.LoadDistribution)
}
}
}
return nil
}
func resourceArmLoadBalancerRuleDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*ArmClient)
lbClient := client.loadBalancerClient
loadBalancer, exists, err := retrieveLoadBalancerById(d.Get("loadbalancer_id").(string), meta)
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer By ID {{err}}", err)
}
if !exists {
d.SetId("")
return nil
}
_, index, exists := findLoadBalancerRuleByName(loadBalancer, d.Get("name").(string))
if !exists {
return nil
}
oldLbRules := *loadBalancer.Properties.LoadBalancingRules
newLbRules := append(oldLbRules[:index], oldLbRules[index+1:]...)
loadBalancer.Properties.LoadBalancingRules = &newLbRules
resGroup, loadBalancerName, err := resourceGroupAndLBNameFromId(d.Get("loadbalancer_id").(string))
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer Name and Group: {{err}}", err)
}
_, err = lbClient.CreateOrUpdate(resGroup, loadBalancerName, *loadBalancer, make(chan struct{}))
if err != nil {
return errwrap.Wrapf("Error Creating/Updating LoadBalancer {{err}}", err)
}
read, err := lbClient.Get(resGroup, loadBalancerName, "")
if err != nil {
return errwrap.Wrapf("Error Getting LoadBalancer {{err}}", err)
}
if read.ID == nil {
return fmt.Errorf("Cannot read LoadBalancer %s (resource group %s) ID", loadBalancerName, resGroup)
}
return nil
}
func expandAzureRmLoadBalancerRule(d *schema.ResourceData, lb *network.LoadBalancer) (*network.LoadBalancingRule, error) {
properties := network.LoadBalancingRulePropertiesFormat{
Protocol: network.TransportProtocol(d.Get("protocol").(string)),
FrontendPort: azure.Int32(int32(d.Get("frontend_port").(int))),
BackendPort: azure.Int32(int32(d.Get("backend_port").(int))),
EnableFloatingIP: azure.Bool(d.Get("enable_floating_ip").(bool)),
}
if v, ok := d.GetOk("idle_timeout_in_minutes"); ok {
properties.IdleTimeoutInMinutes = azure.Int32(int32(v.(int)))
}
if v := d.Get("load_distribution").(string); v != "" {
properties.LoadDistribution = network.LoadDistribution(v)
}
if v := d.Get("frontend_ip_configuration_name").(string); v != "" {
rule, _, exists := findLoadBalancerFrontEndIpConfigurationByName(lb, v)
if !exists {
return nil, fmt.Errorf("[ERROR] Cannot find FrontEnd IP Configuration with the name %s", v)
}
feip := network.SubResource{
ID: rule.ID,
}
properties.FrontendIPConfiguration = &feip
}
if v := d.Get("backend_address_pool_id").(string); v != "" {
beAP := network.SubResource{
ID: &v,
}
properties.BackendAddressPool = &beAP
}
if v := d.Get("probe_id").(string); v != "" {
pid := network.SubResource{
ID: &v,
}
properties.Probe = &pid
}
lbRule := network.LoadBalancingRule{
Name: azure.String(d.Get("name").(string)),
Properties: &properties,
}
return &lbRule, nil
}
func validateArmLoadBalancerRuleName(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if !regexp.MustCompile(`^[a-zA-Z._-]+$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"only word characters and hyphens allowed in %q: %q",
k, value))
}
if len(value) > 80 {
errors = append(errors, fmt.Errorf(
"%q cannot be longer than 80 characters: %q", k, value))
}
if len(value) == 0 {
errors = append(errors, fmt.Errorf(
"%q cannot be an empty string: %q", k, value))
}
if !regexp.MustCompile(`[a-zA-Z]$`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must end with a word character: %q", k, value))
}
if !regexp.MustCompile(`^[a-zA-Z]`).MatchString(value) {
errors = append(errors, fmt.Errorf(
"%q must start with a word character: %q", k, value))
}
return
}
| builtin/providers/azurerm/resource_arm_loadbalancer_rule.go | 1 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.9864048957824707,
0.14316783845424652,
0.00016547154518775642,
0.000393453286960721,
0.3300861418247223
] |
{
"id": 7,
"code_window": [
"\t\t\tValue: \"TestRule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\tfor _, tc := range cases {\n",
"\t\t_, errors := validateArmLoadBalancerRuleName(tc.Value, \"azurerm_lb_rule\")\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t{\n",
"\t\t\tValue: \"Test123Rule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tValue: \"TestRule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go",
"type": "add",
"edit_start_line_idx": 50
} | // //+build ignore
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED from gen-helper.go.tmpl
// ************************************************************
package codec
import (
"encoding"
"reflect"
)
// This file is used to generate helper code for codecgen.
// The values here i.e. genHelper(En|De)coder are not to be used directly by
// library users. They WILL change continously and without notice.
//
// To help enforce this, we create an unexported type with exported members.
// The only way to get the type is via the one exported type that we control (somewhat).
//
// When static codecs are created for types, they will use this value
// to perform encoding or decoding of primitives or known slice or map types.
// GenHelperEncoder is exported so that it can be used externally by codecgen.
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperEncoder(e *Encoder) (genHelperEncoder, encDriver) {
return genHelperEncoder{e:e}, e.e
}
// GenHelperDecoder is exported so that it can be used externally by codecgen.
// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.
func GenHelperDecoder(d *Decoder) (genHelperDecoder, decDriver) {
return genHelperDecoder{d:d}, d.d
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperEncoder struct {
e *Encoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
type genHelperDecoder struct {
d *Decoder
F fastpathT
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBasicHandle() *BasicHandle {
return f.e.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinary() bool {
return f.e.be // f.e.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncFallback(iv interface{}) {
// println(">>>>>>>>> EncFallback")
f.e.encodeI(iv, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) {
bs, fnerr := iv.MarshalText()
f.e.marshal(bs, fnerr, false, c_UTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) {
bs, fnerr := iv.MarshalJSON()
f.e.marshal(bs, fnerr, true, c_UTF8)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) {
bs, fnerr := iv.MarshalBinary()
f.e.marshal(bs, fnerr, false, c_RAW)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) TimeRtidIfBinc() uintptr {
if _, ok := f.e.hh.(*BincHandle); ok {
return timeTypId
}
return 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) IsJSONHandle() bool {
return f.e.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) HasExtensions() bool {
return len(f.e.h.extHandle) != 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncExt(v interface{}) (r bool) {
rt := reflect.TypeOf(v)
if rt.Kind() == reflect.Ptr {
rt = rt.Elem()
}
rtid := reflect.ValueOf(rt).Pointer()
if xfFn := f.e.h.getExt(rtid); xfFn != nil {
f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e)
return true
}
return false
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncSendContainerState(c containerState) {
if f.e.cr != nil {
f.e.cr.sendContainerState(c)
}
}
// ---------------- DECODER FOLLOWS -----------------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBasicHandle() *BasicHandle {
return f.d.h
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinary() bool {
return f.d.be // f.d.hh.isBinaryEncoding()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSwallow() {
f.d.swallow()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecScratchBuffer() []byte {
return f.d.b[:]
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) {
// println(">>>>>>>>> DecFallback")
f.d.decodeI(iv, chkPtr, false, false, false)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) {
return f.d.decSliceHelperStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) {
f.d.structFieldNotFound(index, name)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) {
f.d.arrayCannotExpand(sliceLen, streamLen)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) {
fnerr := tm.UnmarshalText(f.d.d.DecodeBytes(f.d.b[:], true, true))
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) {
// bs := f.dd.DecodeBytes(f.d.b[:], true, true)
// grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself.
fnerr := tm.UnmarshalJSON(f.d.nextValueBytes())
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) {
fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, false, true))
if fnerr != nil {
panic(fnerr)
}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) TimeRtidIfBinc() uintptr {
if _, ok := f.d.hh.(*BincHandle); ok {
return timeTypId
}
return 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) IsJSONHandle() bool {
return f.d.js
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) HasExtensions() bool {
return len(f.d.h.extHandle) != 0
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecExt(v interface{}) (r bool) {
rt := reflect.TypeOf(v).Elem()
rtid := reflect.ValueOf(rt).Pointer()
if xfFn := f.d.h.getExt(rtid); xfFn != nil {
f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext)
return true
}
return false
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int, truncated bool) {
return decInferLen(clen, maxlen, unit)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecSendContainerState(c containerState) {
if f.d.cr != nil {
f.d.cr.sendContainerState(c)
}
}
{{/*
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncDriver() encDriver {
return f.e.e
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecDriver() decDriver {
return f.d.d
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncNil() {
f.e.e.EncodeNil()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncBytes(v []byte) {
f.e.e.EncodeStringBytes(c_RAW, v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayStart(length int) {
f.e.e.EncodeArrayStart(length)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayEnd() {
f.e.e.EncodeArrayEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncArrayEntrySeparator() {
f.e.e.EncodeArrayEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapStart(length int) {
f.e.e.EncodeMapStart(length)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapEnd() {
f.e.e.EncodeMapEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapEntrySeparator() {
f.e.e.EncodeMapEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) EncMapKVSeparator() {
f.e.e.EncodeMapKVSeparator()
}
// ---------
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecBytes(v *[]byte) {
*v = f.d.d.DecodeBytes(*v)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecTryNil() bool {
return f.d.d.TryDecodeAsNil()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsNil() (b bool) {
return f.d.d.IsContainerType(valueTypeNil)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsMap() (b bool) {
return f.d.d.IsContainerType(valueTypeMap)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecContainerIsArray() (b bool) {
return f.d.d.IsContainerType(valueTypeArray)
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecCheckBreak() bool {
return f.d.d.CheckBreak()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapStart() int {
return f.d.d.ReadMapStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayStart() int {
return f.d.d.ReadArrayStart()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapEnd() {
f.d.d.ReadMapEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayEnd() {
f.d.d.ReadArrayEnd()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecArrayEntrySeparator() {
f.d.d.ReadArrayEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapEntrySeparator() {
f.d.d.ReadMapEntrySeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) DecMapKVSeparator() {
f.d.d.ReadMapKVSeparator()
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) ReadStringAsBytes(bs []byte) []byte {
return f.d.d.DecodeStringAsBytes(bs)
}
// -- encode calls (primitives)
{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" true }}(v {{ .Primitive }}) {
ee := f.e.e
{{ encmd .Primitive "v" }}
}
{{ end }}{{ end }}{{ end }}
// -- decode calls (primitives)
{{range .Values}}{{if .Primitive }}{{if ne .Primitive "interface{}" }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) {{ .MethodNamePfx "Dec" true }}(vp *{{ .Primitive }}) {
dd := f.d.d
*vp = {{ decmd .Primitive }}
}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperDecoder) {{ .MethodNamePfx "Read" true }}() (v {{ .Primitive }}) {
dd := f.d.d
v = {{ decmd .Primitive }}
return
}
{{ end }}{{ end }}{{ end }}
// -- encode calls (slices/maps)
{{range .Values}}{{if not .Primitive }}{{if .Slice }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v []{{ .Elem }}) { {{ else }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
func (f genHelperEncoder) {{ .MethodNamePfx "Enc" false }}(v map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
f.F.{{ .MethodNamePfx "Enc" false }}V(v, false, f.e)
}
{{ end }}{{ end }}
// -- decode calls (slices/maps)
{{range .Values}}{{if not .Primitive }}
// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE*
{{if .Slice }}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *[]{{ .Elem }}) {
{{else}}func (f genHelperDecoder) {{ .MethodNamePfx "Dec" false }}(vp *map[{{ .MapKey }}]{{ .Elem }}) { {{end}}
v, changed := f.F.{{ .MethodNamePfx "Dec" false }}V(*vp, false, true, f.d)
if changed {
*vp = v
}
}
{{ end }}{{ end }}
*/}}
| vendor/github.com/coreos/etcd/Godeps/_workspace/src/github.com/ugorji/go/codec/gen-helper.go.tmpl | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.0010482145007699728,
0.00019793880346696824,
0.00016236951341852546,
0.00017057520744856447,
0.00014452592586167157
] |
{
"id": 7,
"code_window": [
"\t\t\tValue: \"TestRule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\tfor _, tc := range cases {\n",
"\t\t_, errors := validateArmLoadBalancerRuleName(tc.Value, \"azurerm_lb_rule\")\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t{\n",
"\t\t\tValue: \"Test123Rule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tValue: \"TestRule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go",
"type": "add",
"edit_start_line_idx": 50
} | package native
import (
"github.com/ziutek/mymysql/mysql"
"log"
)
type Stmt struct {
my *Conn
id uint32
sql string // For reprepare during reconnect
params []paramValue // Parameters binding
rebind bool
binded bool
fields []*mysql.Field
field_count int
param_count int
warning_count int
status mysql.ConnStatus
null_bitmap []byte
}
func (stmt *Stmt) Fields() []*mysql.Field {
return stmt.fields
}
func (stmt *Stmt) NumParam() int {
return stmt.param_count
}
func (stmt *Stmt) WarnCount() int {
return stmt.warning_count
}
func (stmt *Stmt) sendCmdExec() {
// Calculate packet length and NULL bitmap
pkt_len := 1 + 4 + 1 + 4 + 1 + len(stmt.null_bitmap)
for ii := range stmt.null_bitmap {
stmt.null_bitmap[ii] = 0
}
for ii, param := range stmt.params {
par_len := param.Len()
pkt_len += par_len
if par_len == 0 {
null_byte := ii >> 3
null_mask := byte(1) << uint(ii-(null_byte<<3))
stmt.null_bitmap[null_byte] |= null_mask
}
}
if stmt.rebind {
pkt_len += stmt.param_count * 2
}
// Reset sequence number
stmt.my.seq = 0
// Packet sending
pw := stmt.my.newPktWriter(pkt_len)
pw.writeByte(_COM_STMT_EXECUTE)
pw.writeU32(stmt.id)
pw.writeByte(0) // flags = CURSOR_TYPE_NO_CURSOR
pw.writeU32(1) // iteration_count
pw.write(stmt.null_bitmap)
if stmt.rebind {
pw.writeByte(1)
// Types
for _, param := range stmt.params {
pw.writeU16(param.typ)
}
} else {
pw.writeByte(0)
}
// Values
for i := range stmt.params {
pw.writeValue(&stmt.params[i])
}
if stmt.my.Debug {
log.Printf("[%2d <-] Exec command packet: len=%d, null_bitmap=%v, rebind=%t",
stmt.my.seq-1, pkt_len, stmt.null_bitmap, stmt.rebind)
}
// Mark that we sended information about binded types
stmt.rebind = false
}
func (my *Conn) getPrepareResult(stmt *Stmt) interface{} {
loop:
pr := my.newPktReader() // New reader for next packet
pkt0 := pr.readByte()
//log.Println("pkt0:", pkt0, "stmt:", stmt)
if pkt0 == 255 {
// Error packet
my.getErrorPacket(pr)
}
if stmt == nil {
if pkt0 == 0 {
// OK packet
return my.getPrepareOkPacket(pr)
}
} else {
unreaded_params := (stmt.param_count < len(stmt.params))
switch {
case pkt0 == 254:
// EOF packet
stmt.warning_count, stmt.status = my.getEofPacket(pr)
stmt.my.status = stmt.status
return stmt
case pkt0 > 0 && pkt0 < 251 && (stmt.field_count < len(stmt.fields) ||
unreaded_params):
// Field packet
if unreaded_params {
// Read and ignore parameter field. Sentence from MySQL source:
/* skip parameters data: we don't support it yet */
pr.skipAll()
// Increment param_count count
stmt.param_count++
} else {
field := my.getFieldPacket(pr)
stmt.fields[stmt.field_count] = field
// Increment field count
stmt.field_count++
}
// Read next packet
goto loop
}
}
panic(mysql.ErrUnkResultPkt)
}
func (my *Conn) getPrepareOkPacket(pr *pktReader) (stmt *Stmt) {
if my.Debug {
log.Printf("[%2d ->] Perpared OK packet:", my.seq-1)
}
stmt = new(Stmt)
stmt.my = my
// First byte was readed by getPrepRes
stmt.id = pr.readU32()
stmt.fields = make([]*mysql.Field, int(pr.readU16())) // FieldCount
pl := int(pr.readU16()) // ParamCount
if pl > 0 {
stmt.params = make([]paramValue, pl)
stmt.null_bitmap = make([]byte, (pl+7)>>3)
}
pr.skipN(1)
stmt.warning_count = int(pr.readU16())
pr.checkEof()
if my.Debug {
log.Printf(tab8s+"ID=0x%x ParamCount=%d FieldsCount=%d WarnCount=%d",
stmt.id, len(stmt.params), len(stmt.fields), stmt.warning_count,
)
}
return
}
| vendor/github.com/ziutek/mymysql/native/prepared.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00038925878470763564,
0.000185423152288422,
0.0001680347486399114,
0.0001730253134155646,
0.00005103396688355133
] |
{
"id": 7,
"code_window": [
"\t\t\tValue: \"TestRule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n",
"\t}\n",
"\n",
"\tfor _, tc := range cases {\n",
"\t\t_, errors := validateArmLoadBalancerRuleName(tc.Value, \"azurerm_lb_rule\")\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t{\n",
"\t\t\tValue: \"Test123Rule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n",
"\t\t{\n",
"\t\t\tValue: \"TestRule\",\n",
"\t\t\tErrCount: 0,\n",
"\t\t},\n"
],
"file_path": "builtin/providers/azurerm/resource_arm_loadbalancer_rule_test.go",
"type": "add",
"edit_start_line_idx": 50
} | // Copyright 2013 Beego Authors
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package session
import (
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"sync"
"time"
"github.com/Unknwon/com"
)
// FileStore represents a file session store implementation.
type FileStore struct {
p *FileProvider
sid string
lock sync.RWMutex
data map[interface{}]interface{}
}
// NewFileStore creates and returns a file session store.
func NewFileStore(p *FileProvider, sid string, kv map[interface{}]interface{}) *FileStore {
return &FileStore{
p: p,
sid: sid,
data: kv,
}
}
// Set sets value to given key in session.
func (s *FileStore) Set(key, val interface{}) error {
s.lock.Lock()
defer s.lock.Unlock()
s.data[key] = val
return nil
}
// Get gets value by given key in session.
func (s *FileStore) Get(key interface{}) interface{} {
s.lock.RLock()
defer s.lock.RUnlock()
return s.data[key]
}
// Delete delete a key from session.
func (s *FileStore) Delete(key interface{}) error {
s.lock.Lock()
defer s.lock.Unlock()
delete(s.data, key)
return nil
}
// ID returns current session ID.
func (s *FileStore) ID() string {
return s.sid
}
// Release releases resource and save data to provider.
func (s *FileStore) Release() error {
s.p.lock.Lock()
defer s.p.lock.Unlock()
data, err := EncodeGob(s.data)
if err != nil {
return err
}
return ioutil.WriteFile(s.p.filepath(s.sid), data, os.ModePerm)
}
// Flush deletes all session data.
func (s *FileStore) Flush() error {
s.lock.Lock()
defer s.lock.Unlock()
s.data = make(map[interface{}]interface{})
return nil
}
// FileProvider represents a file session provider implementation.
type FileProvider struct {
lock sync.RWMutex
maxlifetime int64
rootPath string
}
// Init initializes file session provider with given root path.
func (p *FileProvider) Init(maxlifetime int64, rootPath string) error {
p.maxlifetime = maxlifetime
p.rootPath = rootPath
return nil
}
func (p *FileProvider) filepath(sid string) string {
return path.Join(p.rootPath, string(sid[0]), string(sid[1]), sid)
}
// Read returns raw session store by session ID.
func (p *FileProvider) Read(sid string) (_ RawStore, err error) {
filename := p.filepath(sid)
if err = os.MkdirAll(path.Dir(filename), os.ModePerm); err != nil {
return nil, err
}
p.lock.RLock()
defer p.lock.RUnlock()
var f *os.File
if com.IsFile(filename) {
f, err = os.OpenFile(filename, os.O_RDWR, os.ModePerm)
} else {
f, err = os.Create(filename)
}
if err != nil {
return nil, err
}
defer f.Close()
if err = os.Chtimes(filename, time.Now(), time.Now()); err != nil {
return nil, err
}
var kv map[interface{}]interface{}
data, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
if len(data) == 0 {
kv = make(map[interface{}]interface{})
} else {
kv, err = DecodeGob(data)
if err != nil {
return nil, err
}
}
return NewFileStore(p, sid, kv), nil
}
// Exist returns true if session with given ID exists.
func (p *FileProvider) Exist(sid string) bool {
p.lock.RLock()
defer p.lock.RUnlock()
return com.IsFile(p.filepath(sid))
}
// Destory deletes a session by session ID.
func (p *FileProvider) Destory(sid string) error {
p.lock.Lock()
defer p.lock.Unlock()
return os.Remove(p.filepath(sid))
}
func (p *FileProvider) regenerate(oldsid, sid string) (err error) {
p.lock.Lock()
defer p.lock.Unlock()
filename := p.filepath(sid)
if com.IsExist(filename) {
return fmt.Errorf("new sid '%s' already exists", sid)
}
oldname := p.filepath(oldsid)
if !com.IsFile(oldname) {
data, err := EncodeGob(make(map[interface{}]interface{}))
if err != nil {
return err
}
if err = os.MkdirAll(path.Dir(oldname), os.ModePerm); err != nil {
return err
}
if err = ioutil.WriteFile(oldname, data, os.ModePerm); err != nil {
return err
}
}
if err = os.MkdirAll(path.Dir(filename), os.ModePerm); err != nil {
return err
}
if err = os.Rename(oldname, filename); err != nil {
return err
}
return nil
}
// Regenerate regenerates a session store from old session ID to new one.
func (p *FileProvider) Regenerate(oldsid, sid string) (_ RawStore, err error) {
if err := p.regenerate(oldsid, sid); err != nil {
return nil, err
}
return p.Read(sid)
}
// Count counts and returns number of sessions.
func (p *FileProvider) Count() int {
count := 0
if err := filepath.Walk(p.rootPath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !fi.IsDir() {
count++
}
return nil
}); err != nil {
log.Printf("error counting session files: %v", err)
return 0
}
return count
}
// GC calls GC to clean expired sessions.
func (p *FileProvider) GC() {
if !com.IsExist(p.rootPath) {
return
}
p.lock.Lock()
defer p.lock.Unlock()
if err := filepath.Walk(p.rootPath, func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if !fi.IsDir() &&
(fi.ModTime().Unix()+p.maxlifetime) < time.Now().Unix() {
return os.Remove(path)
}
return nil
}); err != nil {
log.Printf("error garbage collecting session files: %v", err)
}
}
func init() {
Register("file", &FileProvider{})
}
| vendor/github.com/macaron-contrib/session/file.go | 0 | https://github.com/hashicorp/terraform/commit/957d1cb3a87cf370c65988315bc203f5f6df8d44 | [
0.00027884618612006307,
0.00017702228797134012,
0.00016576447524130344,
0.00017349251720588654,
0.000020803190636797808
] |
{
"id": 0,
"code_window": [
"\n",
"\t// Wire up the internal provisioners first. These might be overridden\n",
"\t// by discovered provisioners below.\n",
"\tfor name := range InternalProvisioners {\n",
"\t\tclient, err := internalPluginClient(\"provisioner\", name)\n",
"\t\tif err != nil {\n",
"\t\t\tlog.Printf(\"[WARN] failed to build command line for internal plugin %q: %s\", name, err)\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tfactories[name] = internalProvisionerFactory(client)\n",
"\t}\n",
"\n",
"\tbyName := plugins.ByName()\n",
"\tfor name, metas := range byName {\n",
"\t\t// Since we validated versions above and we partitioned the sets\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfactories[name] = internalProvisionerFactory(discovery.PluginMeta{Name: name})\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 328
} | package command
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
plugin "github.com/hashicorp/go-plugin"
"github.com/kardianos/osext"
terraformProvider "github.com/hashicorp/terraform/builtin/providers/terraform"
tfplugin "github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/plugin/discovery"
"github.com/hashicorp/terraform/providers"
"github.com/hashicorp/terraform/provisioners"
"github.com/hashicorp/terraform/terraform"
)
// multiVersionProviderResolver is an implementation of
// terraform.ResourceProviderResolver that matches the given version constraints
// against a set of versioned provider plugins to find the newest version of
// each that satisfies the given constraints.
type multiVersionProviderResolver struct {
Available discovery.PluginMetaSet
// Internal is a map that overrides the usual plugin selection process
// for internal plugins. These plugins do not support version constraints
// (will produce an error if one is set). This should be used only in
// exceptional circumstances since it forces the provider's release
// schedule to be tied to that of Terraform Core.
Internal map[string]providers.Factory
}
func choosePlugins(avail discovery.PluginMetaSet, internal map[string]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta {
candidates := avail.ConstrainVersions(reqd)
ret := map[string]discovery.PluginMeta{}
for name, metas := range candidates {
// If the provider is in our internal map then we ignore any
// discovered plugins for it since these are dealt with separately.
if _, isInternal := internal[name]; isInternal {
continue
}
if len(metas) == 0 {
continue
}
ret[name] = metas.Newest()
}
return ret
}
func (r *multiVersionProviderResolver) ResolveProviders(
reqd discovery.PluginRequirements,
) (map[string]providers.Factory, []error) {
factories := make(map[string]providers.Factory, len(reqd))
var errs []error
chosen := choosePlugins(r.Available, r.Internal, reqd)
for name, req := range reqd {
if factory, isInternal := r.Internal[name]; isInternal {
if !req.Versions.Unconstrained() {
errs = append(errs, fmt.Errorf("provider.%s: this provider is built in to Terraform and so it does not support version constraints", name))
continue
}
factories[name] = factory
continue
}
if newest, available := chosen[name]; available {
digest, err := newest.SHA256()
if err != nil {
errs = append(errs, fmt.Errorf("provider.%s: failed to load plugin to verify its signature: %s", name, err))
continue
}
if !reqd[name].AcceptsSHA256(digest) {
errs = append(errs, fmt.Errorf("provider.%s: new or changed plugin executable", name))
continue
}
factories[name] = providerFactory(newest)
} else {
msg := fmt.Sprintf("provider.%s: no suitable version installed", name)
required := req.Versions.String()
// no version is unconstrained
if required == "" {
required = "(any version)"
}
foundVersions := []string{}
for meta := range r.Available.WithName(name) {
foundVersions = append(foundVersions, fmt.Sprintf("%q", meta.Version))
}
found := "none"
if len(foundVersions) > 0 {
found = strings.Join(foundVersions, ", ")
}
msg += fmt.Sprintf("\n version requirements: %q\n versions installed: %s", required, found)
errs = append(errs, errors.New(msg))
}
}
return factories, errs
}
// store the user-supplied path for plugin discovery
func (m *Meta) storePluginPath(pluginPath []string) error {
if len(pluginPath) == 0 {
return nil
}
path := filepath.Join(m.DataDir(), PluginPathFile)
// remove the plugin dir record if the path was set to an empty string
if len(pluginPath) == 1 && (pluginPath[0] == "") {
err := os.Remove(path)
if !os.IsNotExist(err) {
return err
}
return nil
}
js, err := json.MarshalIndent(pluginPath, "", " ")
if err != nil {
return err
}
// if this fails, so will WriteFile
os.MkdirAll(m.DataDir(), 0755)
return ioutil.WriteFile(path, js, 0644)
}
// Load the user-defined plugin search path into Meta.pluginPath if the file
// exists.
func (m *Meta) loadPluginPath() ([]string, error) {
js, err := ioutil.ReadFile(filepath.Join(m.DataDir(), PluginPathFile))
if os.IsNotExist(err) {
return nil, nil
}
if err != nil {
return nil, err
}
var pluginPath []string
if err := json.Unmarshal(js, &pluginPath); err != nil {
return nil, err
}
return pluginPath, nil
}
// the default location for automatically installed plugins
func (m *Meta) pluginDir() string {
return filepath.Join(m.DataDir(), "plugins", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH))
}
// pluginDirs return a list of directories to search for plugins.
//
// Earlier entries in this slice get priority over later when multiple copies
// of the same plugin version are found, but newer versions always override
// older versions where both satisfy the provider version constraints.
func (m *Meta) pluginDirs(includeAutoInstalled bool) []string {
// user defined paths take precedence
if len(m.pluginPath) > 0 {
return m.pluginPath
}
// When searching the following directories, earlier entries get precedence
// if the same plugin version is found twice, but newer versions will
// always get preference below regardless of where they are coming from.
// TODO: Add auto-install dir, default vendor dir and optional override
// vendor dir(s).
dirs := []string{"."}
// Look in the same directory as the Terraform executable.
// If found, this replaces what we found in the config path.
exePath, err := osext.Executable()
if err != nil {
log.Printf("[ERROR] Error discovering exe directory: %s", err)
} else {
dirs = append(dirs, filepath.Dir(exePath))
}
// add the user vendor directory
dirs = append(dirs, DefaultPluginVendorDir)
if includeAutoInstalled {
dirs = append(dirs, m.pluginDir())
}
dirs = append(dirs, m.GlobalPluginDirs...)
return dirs
}
func (m *Meta) pluginCache() discovery.PluginCache {
dir := m.PluginCacheDir
if dir == "" {
return nil // cache disabled
}
dir = filepath.Join(dir, pluginMachineName)
return discovery.NewLocalPluginCache(dir)
}
// providerPluginSet returns the set of valid providers that were discovered in
// the defined search paths.
func (m *Meta) providerPluginSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(true))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
// providerPluginAutoInstalledSet returns the set of providers that exist
// within the auto-install directory.
func (m *Meta) providerPluginAutoInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", []string{m.pluginDir()})
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q", p.Name)
}
return plugins
}
// providerPluginManuallyInstalledSet returns the set of providers that exist
// in all locations *except* the auto-install directory.
func (m *Meta) providerPluginManuallyInstalledSet() discovery.PluginMetaSet {
plugins := discovery.FindPlugins("provider", m.pluginDirs(false))
// Add providers defined in the legacy .terraformrc,
if m.PluginOverrides != nil {
for k, v := range m.PluginOverrides.Providers {
log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v)
}
plugins = plugins.OverridePaths(m.PluginOverrides.Providers)
}
plugins, _ = plugins.ValidateVersions()
for p := range plugins {
log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path)
}
return plugins
}
func (m *Meta) providerResolver() providers.Resolver {
return &multiVersionProviderResolver{
Available: m.providerPluginSet(),
Internal: m.internalProviders(),
}
}
func (m *Meta) internalProviders() map[string]providers.Factory {
return map[string]providers.Factory{
"terraform": func() (providers.Interface, error) {
return terraformProvider.NewProvider(), nil
},
}
}
// filter the requirements returning only the providers that we can't resolve
func (m *Meta) missingPlugins(avail discovery.PluginMetaSet, reqd discovery.PluginRequirements) discovery.PluginRequirements {
missing := make(discovery.PluginRequirements)
candidates := avail.ConstrainVersions(reqd)
internal := m.internalProviders()
for name, versionSet := range reqd {
// internal providers can't be missing
if _, ok := internal[name]; ok {
continue
}
log.Printf("[DEBUG] plugin requirements: %q=%q", name, versionSet.Versions)
if metas := candidates[name]; metas.Count() == 0 {
missing[name] = versionSet
}
}
return missing
}
func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory {
dirs := m.pluginDirs(true)
plugins := discovery.FindPlugins("provisioner", dirs)
plugins, _ = plugins.ValidateVersions()
// For now our goal is to just find the latest version of each plugin
// we have on the system. All provisioners should be at version 0.0.0
// currently, so there should actually only be one instance of each plugin
// name here, even though the discovery interface forces us to pretend
// that might not be true.
factories := make(map[string]terraform.ProvisionerFactory)
// Wire up the internal provisioners first. These might be overridden
// by discovered provisioners below.
for name := range InternalProvisioners {
client, err := internalPluginClient("provisioner", name)
if err != nil {
log.Printf("[WARN] failed to build command line for internal plugin %q: %s", name, err)
continue
}
factories[name] = internalProvisionerFactory(client)
}
byName := plugins.ByName()
for name, metas := range byName {
// Since we validated versions above and we partitioned the sets
// by name, we're guaranteed that the metas in our set all have
// valid versions and that there's at least one meta.
newest := metas.Newest()
factories[name] = provisionerFactory(newest)
}
return factories
}
func internalPluginClient(kind, name string) (*plugin.Client, error) {
cmdLine, err := BuildPluginCommandString(kind, name)
if err != nil {
return nil, err
}
// See the docstring for BuildPluginCommandString for why we need to do
// this split here.
cmdArgv := strings.Split(cmdLine, TFSPACE)
cfg := &plugin.ClientConfig{
Cmd: exec.Command(cmdArgv[0], cmdArgv[1:]...),
HandshakeConfig: tfplugin.Handshake,
Managed: true,
VersionedPlugins: tfplugin.VersionedPlugins,
AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
}
return plugin.NewClient(cfg), nil
}
func providerFactory(meta discovery.PluginMeta) providers.Factory {
return func() (providers.Interface, error) {
client := tfplugin.Client(meta)
// Request the RPC client so we can get the provider
// so we can build the actual RPC-implemented provider.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvider)
p.PluginClient = client
return p, nil
}
}
func provisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
client := tfplugin.Client(meta)
return newProvisionerClient(client)
}
}
func internalProvisionerFactory(client *plugin.Client) terraform.ProvisionerFactory {
return func() (provisioners.Interface, error) {
return newProvisionerClient(client)
}
}
func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) {
// Request the RPC client so we can get the provisioner
// so we can build the actual RPC-implemented provisioner.
rpcClient, err := client.Client()
if err != nil {
return nil, err
}
raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName)
if err != nil {
return nil, err
}
// store the client so that the plugin can kill the child process
p := raw.(*tfplugin.GRPCProvisioner)
p.PluginClient = client
return p, nil
}
| command/plugins.go | 1 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.9981539845466614,
0.0926833301782608,
0.00016232355847023427,
0.0012709777802228928,
0.28301557898521423
] |
{
"id": 0,
"code_window": [
"\n",
"\t// Wire up the internal provisioners first. These might be overridden\n",
"\t// by discovered provisioners below.\n",
"\tfor name := range InternalProvisioners {\n",
"\t\tclient, err := internalPluginClient(\"provisioner\", name)\n",
"\t\tif err != nil {\n",
"\t\t\tlog.Printf(\"[WARN] failed to build command line for internal plugin %q: %s\", name, err)\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tfactories[name] = internalProvisionerFactory(client)\n",
"\t}\n",
"\n",
"\tbyName := plugins.ByName()\n",
"\tfor name, metas := range byName {\n",
"\t\t// Since we validated versions above and we partitioned the sets\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfactories[name] = internalProvisionerFactory(discovery.PluginMeta{Name: name})\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 328
} | variable "should_ask" {}
provider "test" {
value = "${var.should_ask}"
}
resource "test_instance" "foo" {
foo = "bar"
}
| backend/local/test-fixtures/refresh-var-unset/main.tf | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00017438943905290216,
0.00017438943905290216,
0.00017438943905290216,
0.00017438943905290216,
0
] |
{
"id": 0,
"code_window": [
"\n",
"\t// Wire up the internal provisioners first. These might be overridden\n",
"\t// by discovered provisioners below.\n",
"\tfor name := range InternalProvisioners {\n",
"\t\tclient, err := internalPluginClient(\"provisioner\", name)\n",
"\t\tif err != nil {\n",
"\t\t\tlog.Printf(\"[WARN] failed to build command line for internal plugin %q: %s\", name, err)\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tfactories[name] = internalProvisionerFactory(client)\n",
"\t}\n",
"\n",
"\tbyName := plugins.ByName()\n",
"\tfor name, metas := range byName {\n",
"\t\t// Since we validated versions above and we partitioned the sets\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfactories[name] = internalProvisionerFactory(discovery.PluginMeta{Name: name})\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 328
} | package volumes
import (
"github.com/gophercloud/gophercloud"
)
// WaitForStatus will continually poll the resource, checking for a particular
// status. It will do this for the amount of seconds defined.
func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error {
return gophercloud.WaitFor(secs, func() (bool, error) {
current, err := Get(c, id).Extract()
if err != nil {
return false, err
}
if current.Status == status {
return true, nil
}
return false, nil
})
}
| vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00017438246868550777,
0.00017236785788554698,
0.00017015218327287585,
0.00017256892169825733,
0.0000017328490002910257
] |
{
"id": 0,
"code_window": [
"\n",
"\t// Wire up the internal provisioners first. These might be overridden\n",
"\t// by discovered provisioners below.\n",
"\tfor name := range InternalProvisioners {\n",
"\t\tclient, err := internalPluginClient(\"provisioner\", name)\n",
"\t\tif err != nil {\n",
"\t\t\tlog.Printf(\"[WARN] failed to build command line for internal plugin %q: %s\", name, err)\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tfactories[name] = internalProvisionerFactory(client)\n",
"\t}\n",
"\n",
"\tbyName := plugins.ByName()\n",
"\tfor name, metas := range byName {\n",
"\t\t// Since we validated versions above and we partitioned the sets\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tfactories[name] = internalProvisionerFactory(discovery.PluginMeta{Name: name})\n"
],
"file_path": "command/plugins.go",
"type": "replace",
"edit_start_line_idx": 328
} | package intfns
import (
"fmt"
"math"
"github.com/ChrisTrenkamp/goxpath/tree"
)
func number(c tree.Ctx, args ...tree.Result) (tree.Result, error) {
if b, ok := args[0].(tree.IsNum); ok {
return b.Num(), nil
}
return nil, fmt.Errorf("Cannot convert object to a number")
}
func sum(c tree.Ctx, args ...tree.Result) (tree.Result, error) {
n, ok := args[0].(tree.NodeSet)
if !ok {
return nil, fmt.Errorf("Cannot convert object to a node-set")
}
ret := 0.0
for _, i := range n {
ret += float64(tree.GetNodeNum(i))
}
return tree.Num(ret), nil
}
func floor(c tree.Ctx, args ...tree.Result) (tree.Result, error) {
n, ok := args[0].(tree.IsNum)
if !ok {
return nil, fmt.Errorf("Cannot convert object to a number")
}
return tree.Num(math.Floor(float64(n.Num()))), nil
}
func ceiling(c tree.Ctx, args ...tree.Result) (tree.Result, error) {
n, ok := args[0].(tree.IsNum)
if !ok {
return nil, fmt.Errorf("Cannot convert object to a number")
}
return tree.Num(math.Ceil(float64(n.Num()))), nil
}
func round(c tree.Ctx, args ...tree.Result) (tree.Result, error) {
isn, ok := args[0].(tree.IsNum)
if !ok {
return nil, fmt.Errorf("Cannot convert object to a number")
}
n := isn.Num()
if math.IsNaN(float64(n)) || math.IsInf(float64(n), 0) {
return n, nil
}
if n < -0.5 {
n = tree.Num(int(n - 0.5))
} else if n > 0.5 {
n = tree.Num(int(n + 0.5))
} else {
n = 0
}
return n, nil
}
| vendor/github.com/ChrisTrenkamp/goxpath/internal/parser/intfns/numfns.go | 0 | https://github.com/hashicorp/terraform/commit/52c0032aedf1c3355ffbec1e566a2b0539ae019e | [
0.00017643094179220498,
0.0001749378425301984,
0.00017246034985873848,
0.00017516124353278428,
0.0000013777128060610266
] |
Subsets and Splits