hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 8,
"code_window": [
"\t\t}\n",
"\n",
"\t\tret, ttl, err := s.updateState(origState, tryUpdate)\n",
"\t\tif err != nil {\n",
"\t\t\t// If our data is already up to date, return the error\n",
"\t\t\tif !mustCheckData {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 342
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.9975804090499878,
0.06794591248035431,
0.00016094969760160893,
0.0001781554165063426,
0.23265129327774048
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\n",
"\t\tret, ttl, err := s.updateState(origState, tryUpdate)\n",
"\t\tif err != nil {\n",
"\t\t\t// If our data is already up to date, return the error\n",
"\t\t\tif !mustCheckData {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 342
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"fake_apiregistration_client.go",
"fake_apiservice.go",
],
importmap = "k8s.io/kubernetes/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/typed/apiregistration/v1/fake",
importpath = "k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/typed/apiregistration/v1/fake",
visibility = ["//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated:__subpackages__"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/typed/apiregistration/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/deprecated/typed/apiregistration/v1/fake/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017676602874416858,
0.00017551386554259807,
0.0001724042958812788,
0.0001764425542205572,
0.0000018006833215622464
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\n",
"\t\tret, ttl, err := s.updateState(origState, tryUpdate)\n",
"\t\tif err != nil {\n",
"\t\t\t// If our data is already up to date, return the error\n",
"\t\t\tif !mustCheckData {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 342
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1beta1
import (
"context"
v1beta1 "k8s.io/api/authorization/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface.
// A group's client should implement this interface.
type SelfSubjectAccessReviewsGetter interface {
SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface
}
// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources.
type SelfSubjectAccessReviewInterface interface {
Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (*v1beta1.SelfSubjectAccessReview, error)
SelfSubjectAccessReviewExpansion
}
// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface
type selfSubjectAccessReviews struct {
client rest.Interface
}
// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews
func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews {
return &selfSubjectAccessReviews{
client: c.RESTClient(),
}
}
// Create takes the representation of a selfSubjectAccessReview and creates it. Returns the server's representation of the selfSubjectAccessReview, and an error, if there is any.
func (c *selfSubjectAccessReviews) Create(ctx context.Context, selfSubjectAccessReview *v1beta1.SelfSubjectAccessReview, opts v1.CreateOptions) (result *v1beta1.SelfSubjectAccessReview, err error) {
result = &v1beta1.SelfSubjectAccessReview{}
err = c.client.Post().
Resource("selfsubjectaccessreviews").
VersionedParams(&opts, scheme.ParameterCodec).
Body(selfSubjectAccessReview).
Do(ctx).
Into(result)
return
}
| staging/src/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00019462672935333103,
0.00017696154827717692,
0.00016865998622961342,
0.00017449639562983066,
0.000008210856321966276
] |
{
"id": 8,
"code_window": [
"\t\t}\n",
"\n",
"\t\tret, ttl, err := s.updateState(origState, tryUpdate)\n",
"\t\tif err != nil {\n",
"\t\t\t// If our data is already up to date, return the error\n",
"\t\t\tif !mustCheckData {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 342
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// CronJobs returns a CronJobInformer.
CronJobs() CronJobInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// CronJobs returns a CronJobInformer.
func (v *version) CronJobs() CronJobInformer {
return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
| staging/src/k8s.io/client-go/informers/batch/v1beta1/interface.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0002560432767495513,
0.00018928060308098793,
0.00016570631123613566,
0.00017803908849600703,
0.00003385815944056958
] |
{
"id": 9,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tmustCheckData = false\n",
"\t\t\t// Retry\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\n",
"\t\tdata, err := runtime.Encode(s.codec, ret)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 352
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.9979524612426758,
0.13186602294445038,
0.0001633269857848063,
0.00027382108964957297,
0.31796351075172424
] |
{
"id": 9,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tmustCheckData = false\n",
"\t\t\t// Retry\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\n",
"\t\tdata, err := runtime.Encode(s.codec, ret)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 352
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package renewal
import (
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"net"
"os"
"path/filepath"
"testing"
"time"
certutil "k8s.io/client-go/util/cert"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
certtestutil "k8s.io/kubernetes/cmd/kubeadm/app/util/certs"
"k8s.io/kubernetes/cmd/kubeadm/app/util/pkiutil"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
)
var (
testCACertCfg = &pkiutil.CertConfig{
Config: certutil.Config{CommonName: "kubernetes"},
}
testCACert, testCAKey, _ = pkiutil.NewCertificateAuthority(testCACertCfg)
testCertCfg = &pkiutil.CertConfig{
Config: certutil.Config{
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")},
DNSNames: []string{"test-domain.space"},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
},
}
)
func TestNewManager(t *testing.T) {
tests := []struct {
name string
cfg *kubeadmapi.ClusterConfiguration
expectedCertificates int
}{
{
name: "cluster with local etcd",
cfg: &kubeadmapi.ClusterConfiguration{},
expectedCertificates: 10, //[admin apiserver apiserver-etcd-client apiserver-kubelet-client controller-manager etcd/healthcheck-client etcd/peer etcd/server front-proxy-client scheduler]
},
{
name: "cluster with external etcd",
cfg: &kubeadmapi.ClusterConfiguration{
Etcd: kubeadmapi.Etcd{
External: &kubeadmapi.ExternalEtcd{},
},
},
expectedCertificates: 6, // [admin apiserver apiserver-kubelet-client controller-manager front-proxy-client scheduler]
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
rm, err := NewManager(test.cfg, "")
if err != nil {
t.Fatalf("Failed to create the certificate renewal manager: %v", err)
}
if len(rm.Certificates()) != test.expectedCertificates {
t.Errorf("Expected %d certificates, saw %d", test.expectedCertificates, len(rm.Certificates()))
}
})
}
}
func TestRenewUsingLocalCA(t *testing.T) {
dir := testutil.SetupTempDir(t)
defer os.RemoveAll(dir)
if err := pkiutil.WriteCertAndKey(dir, "ca", testCACert, testCAKey); err != nil {
t.Fatalf("couldn't write out CA certificate to %s", dir)
}
cfg := &kubeadmapi.ClusterConfiguration{
CertificatesDir: dir,
}
rm, err := NewManager(cfg, dir)
if err != nil {
t.Fatalf("Failed to create the certificate renewal manager: %v", err)
}
tests := []struct {
name string
certName string
createCertFunc func() *x509.Certificate
}{
{
name: "Certificate renewal for a PKI certificate",
certName: "apiserver",
createCertFunc: func() *x509.Certificate {
return writeTestCertificate(t, dir, "apiserver", testCACert, testCAKey)
},
},
{
name: "Certificate renewal for a certificate embedded in a kubeconfig file",
certName: "admin.conf",
createCertFunc: func() *x509.Certificate {
return writeTestKubeconfig(t, dir, "admin.conf", testCACert, testCAKey)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cert := test.createCertFunc()
time.Sleep(1 * time.Second)
_, err := rm.RenewUsingLocalCA(test.certName)
if err != nil {
t.Fatalf("error renewing certificate: %v", err)
}
newCert, err := rm.certificates[test.certName].readwriter.Read()
if err != nil {
t.Fatalf("error reading renewed certificate: %v", err)
}
if newCert.SerialNumber.Cmp(cert.SerialNumber) == 0 {
t.Fatal("expected new certificate, but renewed certificate has same serial number")
}
if !newCert.NotAfter.After(cert.NotAfter) {
t.Fatalf("expected new certificate with updated expiration, but renewed certificate has same NotAfter value: saw %s, expected greather than %s", newCert.NotAfter, cert.NotAfter)
}
certtestutil.AssertCertificateIsSignedByCa(t, newCert, testCACert)
certtestutil.AssertCertificateHasClientAuthUsage(t, newCert)
certtestutil.AssertCertificateHasOrganizations(t, newCert, testCertCfg.Organization...)
certtestutil.AssertCertificateHasCommonName(t, newCert, testCertCfg.CommonName)
certtestutil.AssertCertificateHasDNSNames(t, newCert, testCertCfg.AltNames.DNSNames...)
certtestutil.AssertCertificateHasIPAddresses(t, newCert, testCertCfg.AltNames.IPs...)
})
}
}
func TestCreateRenewCSR(t *testing.T) {
dir := testutil.SetupTempDir(t)
defer os.RemoveAll(dir)
outdir := filepath.Join(dir, "out")
if err := os.MkdirAll(outdir, 0755); err != nil {
t.Fatalf("couldn't create %s", outdir)
}
if err := pkiutil.WriteCertAndKey(dir, "ca", testCACert, testCAKey); err != nil {
t.Fatalf("couldn't write out CA certificate to %s", dir)
}
cfg := &kubeadmapi.ClusterConfiguration{
CertificatesDir: dir,
}
rm, err := NewManager(cfg, dir)
if err != nil {
t.Fatalf("Failed to create the certificate renewal manager: %v", err)
}
tests := []struct {
name string
certName string
createCertFunc func() *x509.Certificate
}{
{
name: "Creation of a CSR request for renewal of a PKI certificate",
certName: "apiserver",
createCertFunc: func() *x509.Certificate {
return writeTestCertificate(t, dir, "apiserver", testCACert, testCAKey)
},
},
{
name: "Creation of a CSR request for renewal of a certificate embedded in a kubeconfig file",
certName: "admin.conf",
createCertFunc: func() *x509.Certificate {
return writeTestKubeconfig(t, dir, "admin.conf", testCACert, testCAKey)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
test.createCertFunc()
time.Sleep(1 * time.Second)
err := rm.CreateRenewCSR(test.certName, outdir)
if err != nil {
t.Fatalf("error renewing certificate: %v", err)
}
file := fmt.Sprintf("%s.key", test.certName)
if _, err := os.Stat(filepath.Join(outdir, file)); os.IsNotExist(err) {
t.Errorf("Expected file %s does not exist", file)
}
file = fmt.Sprintf("%s.csr", test.certName)
if _, err := os.Stat(filepath.Join(outdir, file)); os.IsNotExist(err) {
t.Errorf("Expected file %s does not exist", file)
}
})
}
}
func TestCertToConfig(t *testing.T) {
expectedConfig := &certutil.Config{
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
AltNames: certutil.AltNames{
IPs: []net.IP{net.ParseIP("10.100.0.1")},
DNSNames: []string{"test-domain.space"},
},
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
}
cert := &x509.Certificate{
Subject: pkix.Name{
CommonName: "test-common-name",
Organization: []string{"sig-cluster-lifecycle"},
},
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
DNSNames: []string{"test-domain.space"},
IPAddresses: []net.IP{net.ParseIP("10.100.0.1")},
}
cfg := certToConfig(cert)
if cfg.CommonName != expectedConfig.CommonName {
t.Errorf("expected common name %q, got %q", expectedConfig.CommonName, cfg.CommonName)
}
if len(cfg.Organization) != 1 || cfg.Organization[0] != expectedConfig.Organization[0] {
t.Errorf("expected organization %v, got %v", expectedConfig.Organization, cfg.Organization)
}
if len(cfg.Usages) != 1 || cfg.Usages[0] != expectedConfig.Usages[0] {
t.Errorf("expected ext key usage %v, got %v", expectedConfig.Usages, cfg.Usages)
}
if len(cfg.AltNames.IPs) != 1 || cfg.AltNames.IPs[0].String() != expectedConfig.AltNames.IPs[0].String() {
t.Errorf("expected SAN IPs %v, got %v", expectedConfig.AltNames.IPs, cfg.AltNames.IPs)
}
if len(cfg.AltNames.DNSNames) != 1 || cfg.AltNames.DNSNames[0] != expectedConfig.AltNames.DNSNames[0] {
t.Errorf("expected SAN DNSNames %v, got %v", expectedConfig.AltNames.DNSNames, cfg.AltNames.DNSNames)
}
}
| cmd/kubeadm/app/phases/certs/renewal/manager_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0006353679927997291,
0.00020184733148198575,
0.00016256967501249164,
0.00017045751155819744,
0.00009373549983138219
] |
{
"id": 9,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tmustCheckData = false\n",
"\t\t\t// Retry\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\n",
"\t\tdata, err := runtime.Encode(s.codec, ret)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 352
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"context"
"encoding/json"
"fmt"
"reflect"
"testing"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
"k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apiextensions-apiserver/test/integration/fixtures"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
genericfeatures "k8s.io/apiserver/pkg/features"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/dynamic"
featuregatetesting "k8s.io/component-base/featuregate/testing"
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/test/integration/framework"
)
// TestApplyCRDNoSchema tests that CRDs and CRs can both be applied to with a PATCH request with the apply content type
// when there is no validation field provided.
func TestApplyCRDNoSchema(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := fixtures.NewMultipleVersionNoxuCRD(apiextensionsv1beta1.ClusterScoped)
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Version
name := "mytest"
rest := apiExtensionClient.Discovery().RESTClient()
yamlBody := []byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
replicas: 1`, apiVersion, kind, name))
result, err := rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to create custom resource with apply: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1)
// Patch object to change the number of replicas
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"spec":{"replicas": 5}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 5)
// Re-apply, we should get conflicts now, since the number of replicas was changed.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when applying object after updating replicas, got: %v", status.Status().Details.Causes)
}
// Re-apply with force, should work fine.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("force", "true").
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1)
// Try to set managed fields using a subresource and verify that it has no effect
existingManagedFields, err := getManagedFields(result)
if err != nil {
t.Fatalf("failed to get managedFields from response: %v", err)
}
updateBytes := []byte(`{
"metadata": {
"managedFields": [{
"manager":"testing",
"operation":"Update",
"apiVersion":"v1",
"fieldsType":"FieldsV1",
"fieldsV1":{
"f:spec":{
"f:containers":{
"k:{\"name\":\"testing\"}":{
".":{},
"f:image":{},
"f:name":{}
}
}
}
}
}]
}
}`)
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
SubResource("status").
Name(name).
Param("fieldManager", "subresource_test").
Body(updateBytes).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("Error updating subresource: %v ", err)
}
newManagedFields, err := getManagedFields(result)
if err != nil {
t.Fatalf("failed to get managedFields from response: %v", err)
}
if !reflect.DeepEqual(existingManagedFields, newManagedFields) {
t.Fatalf("Expected managed fields to not have changed when trying manually settting them via subresoures.\n\nExpected: %#v\n\nGot: %#v", existingManagedFields, newManagedFields)
}
// However, it is possible to modify managed fields using the main resource
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "subresource_test").
Body([]byte(`{"metadata":{"managedFields":[{}]}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("Error updating managed fields of the main resource: %v ", err)
}
newManagedFields, err = getManagedFields(result)
if err != nil {
t.Fatalf("failed to get managedFields from response: %v", err)
}
if len(newManagedFields) != 0 {
t.Fatalf("Expected managed fields to have been reset, but got: %v", newManagedFields)
}
}
// TestApplyCRDStructuralSchema tests that when a CRD has a structural schema in its validation field,
// it will be used to construct the CR schema used by apply.
func TestApplyCRDStructuralSchema(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := fixtures.NewMultipleVersionNoxuCRD(apiextensionsv1beta1.ClusterScoped)
var c apiextensionsv1beta1.CustomResourceValidation
err = json.Unmarshal([]byte(`{
"openAPIV3Schema": {
"type": "object",
"properties": {
"spec": {
"type": "object",
"x-kubernetes-preserve-unknown-fields": true,
"properties": {
"cronSpec": {
"type": "string",
"pattern": "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$"
},
"ports": {
"type": "array",
"x-kubernetes-list-map-keys": [
"containerPort",
"protocol"
],
"x-kubernetes-list-type": "map",
"items": {
"properties": {
"containerPort": {
"format": "int32",
"type": "integer"
},
"hostIP": {
"type": "string"
},
"hostPort": {
"format": "int32",
"type": "integer"
},
"name": {
"type": "string"
},
"protocol": {
"type": "string"
}
},
"required": [
"containerPort",
"protocol"
],
"type": "object"
}
}
}
}
}
}
}`), &c)
if err != nil {
t.Fatal(err)
}
noxuDefinition.Spec.Validation = &c
falseBool := false
noxuDefinition.Spec.PreserveUnknownFields = &falseBool
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Version
name := "mytest"
rest := apiExtensionClient.Discovery().RESTClient()
yamlBody := []byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
finalizers:
- test-finalizer
spec:
cronSpec: "* * * * */5"
replicas: 1
ports:
- name: x
containerPort: 80
protocol: TCP`, apiVersion, kind, name))
result, err := rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to create custom resource with apply: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 1)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyReplicas(t, result, 1)
verifyNumPorts(t, result, 1)
// Patch object to add another finalizer to the finalizers list
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"metadata":{"finalizers":["test-finalizer","another-one"]}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to add finalizer with merge patch: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 2)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyFinalizersIncludes(t, result, "another-one")
// Re-apply the same config, should work fine, since finalizers should have the list-type extension 'set'.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
SetHeader("Accept", "application/json").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply same config after adding a finalizer: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 2)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyFinalizersIncludes(t, result, "another-one")
// Patch object to change the number of replicas
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"spec":{"replicas": 5}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 5)
// Re-apply, we should get conflicts now, since the number of replicas was changed.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when applying object after updating replicas, got: %v", status.Status().Details.Causes)
}
// Re-apply with force, should work fine.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("force", "true").
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1)
// New applier tries to edit an existing list item, we should get conflicts.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test_2").
Body([]byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
ports:
- name: "y"
containerPort: 80
protocol: TCP`, apiVersion, kind, name))).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result)
}
status, ok = err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when a different applier updates existing list item, got: %v", status.Status().Details.Causes)
}
// New applier tries to add a new list item, should work fine.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test_2").
Body([]byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
ports:
- name: "y"
containerPort: 8080
protocol: TCP`, apiVersion, kind, name))).
SetHeader("Accept", "application/json").
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to add a new list item to the object as a different applier: %v:\n%v", err, string(result))
}
verifyNumPorts(t, result, 2)
// UpdateOnCreate
notExistingYAMLBody := []byte(fmt.Sprintf(`
{
"apiVersion": "%s",
"kind": "%s",
"metadata": {
"name": "%s",
"finalizers": [
"test-finalizer"
]
},
"spec": {
"cronSpec": "* * * * */5",
"replicas": 1,
"ports": [
{
"name": "x",
"containerPort": 80
}
]
},
"protocol": "TCP"
}`, apiVersion, kind, "should-not-exist"))
_, err = rest.Put().
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name("should-not-exist").
Param("fieldManager", "apply_test").
Body(notExistingYAMLBody).
DoRaw(context.TODO())
if !apierrors.IsNotFound(err) {
t.Fatalf("create on update should fail with notFound, got %v", err)
}
}
// TestApplyCRDNonStructuralSchema tests that when a CRD has a non-structural schema in its validation field,
// it will be used to construct the CR schema used by apply, but any non-structural parts of the schema will be treated as
// nested maps (same as a CRD without a schema)
func TestApplyCRDNonStructuralSchema(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
var c apiextensionsv1beta1.CustomResourceValidation
err = json.Unmarshal([]byte(`{
"openAPIV3Schema": {
"type": "object",
"properties": {
"spec": {
"anyOf": [
{
"type": "object",
"properties": {
"cronSpec": {
"type": "string",
"pattern": "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$"
}
}
}, {
"type": "string"
}
]
}
}
}
}`), &c)
if err != nil {
t.Fatal(err)
}
noxuDefinition.Spec.Validation = &c
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Version
name := "mytest"
rest := apiExtensionClient.Discovery().RESTClient()
yamlBody := []byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
finalizers:
- test-finalizer
spec:
cronSpec: "* * * * */5"
replicas: 1`, apiVersion, kind, name))
result, err := rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to create custom resource with apply: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 1)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyReplicas(t, result, 1.0)
// Patch object to add another finalizer to the finalizers list
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"metadata":{"finalizers":["test-finalizer","another-one"]}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to add finalizer with merge patch: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 2)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyFinalizersIncludes(t, result, "another-one")
// Re-apply the same config, should work fine, since finalizers should have the list-type extension 'set'.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
SetHeader("Accept", "application/json").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply same config after adding a finalizer: %v:\n%v", err, string(result))
}
verifyNumFinalizers(t, result, 2)
verifyFinalizersIncludes(t, result, "test-finalizer")
verifyFinalizersIncludes(t, result, "another-one")
// Patch object to change the number of replicas
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"spec":{"replicas": 5}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 5.0)
// Re-apply, we should get conflicts now, since the number of replicas was changed.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when applying object after updating replicas, got: %v", status.Status().Details.Causes)
}
// Re-apply with force, should work fine.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("force", "true").
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1.0)
}
// verifyNumFinalizers checks that len(.metadata.finalizers) == n
func verifyNumFinalizers(t *testing.T, b []byte, n int) {
obj := unstructured.Unstructured{}
err := obj.UnmarshalJSON(b)
if err != nil {
t.Fatalf("failed to unmarshal response: %v", err)
}
if actual, expected := len(obj.GetFinalizers()), n; actual != expected {
t.Fatalf("expected %v finalizers but got %v:\n%v", expected, actual, string(b))
}
}
// verifyFinalizersIncludes checks that .metadata.finalizers includes e
func verifyFinalizersIncludes(t *testing.T, b []byte, e string) {
obj := unstructured.Unstructured{}
err := obj.UnmarshalJSON(b)
if err != nil {
t.Fatalf("failed to unmarshal response: %v", err)
}
for _, a := range obj.GetFinalizers() {
if a == e {
return
}
}
t.Fatalf("expected finalizers to include %q but got: %v", e, obj.GetFinalizers())
}
// verifyReplicas checks that .spec.replicas == r
func verifyReplicas(t *testing.T, b []byte, r int) {
obj := unstructured.Unstructured{}
err := obj.UnmarshalJSON(b)
if err != nil {
t.Fatalf("failed to find replicas number in response: %v:\n%v", err, string(b))
}
spec, ok := obj.Object["spec"]
if !ok {
t.Fatalf("failed to find replicas number in response:\n%v", string(b))
}
specMap, ok := spec.(map[string]interface{})
if !ok {
t.Fatalf("failed to find replicas number in response:\n%v", string(b))
}
replicas, ok := specMap["replicas"]
if !ok {
t.Fatalf("failed to find replicas number in response:\n%v", string(b))
}
replicasNumber, ok := replicas.(int64)
if !ok {
t.Fatalf("failed to find replicas number in response: expected int64 but got: %v", reflect.TypeOf(replicas))
}
if actual, expected := replicasNumber, int64(r); actual != expected {
t.Fatalf("expected %v ports but got %v:\n%v", expected, actual, string(b))
}
}
// verifyNumPorts checks that len(.spec.ports) == n
func verifyNumPorts(t *testing.T, b []byte, n int) {
obj := unstructured.Unstructured{}
err := obj.UnmarshalJSON(b)
if err != nil {
t.Fatalf("failed to find ports list in response: %v:\n%v", err, string(b))
}
spec, ok := obj.Object["spec"]
if !ok {
t.Fatalf("failed to find ports list in response:\n%v", string(b))
}
specMap, ok := spec.(map[string]interface{})
if !ok {
t.Fatalf("failed to find ports list in response:\n%v", string(b))
}
ports, ok := specMap["ports"]
if !ok {
t.Fatalf("failed to find ports list in response:\n%v", string(b))
}
portsList, ok := ports.([]interface{})
if !ok {
t.Fatalf("failed to find ports list in response: expected array but got: %v", reflect.TypeOf(ports))
}
if actual, expected := len(portsList), n; actual != expected {
t.Fatalf("expected %v ports but got %v:\n%v", expected, actual, string(b))
}
}
// TestApplyCRDUnhandledSchema tests that when a CRD has a schema that kube-openapi ToProtoModels cannot handle correctly,
// apply falls back to non-schema behavior
func TestApplyCRDUnhandledSchema(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := fixtures.NewNoxuCustomResourceDefinition(apiextensionsv1beta1.ClusterScoped)
// This is a schema that kube-openapi ToProtoModels does not handle correctly.
// https://github.com/kubernetes/kubernetes/blob/38752f7f99869ed65fb44378360a517649dc2f83/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go#L184
var c apiextensionsv1beta1.CustomResourceValidation
err = json.Unmarshal([]byte(`{
"openAPIV3Schema": {
"properties": {
"TypeFooBar": {
"type": "array"
}
}
}
}`), &c)
if err != nil {
t.Fatal(err)
}
noxuDefinition.Spec.Validation = &c
noxuDefinition, err = fixtures.CreateNewCustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Version
name := "mytest"
rest := apiExtensionClient.Discovery().RESTClient()
yamlBody := []byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
replicas: 1`, apiVersion, kind, name))
result, err := rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to create custom resource with apply: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1)
// Patch object to change the number of replicas
result, err = rest.Patch(types.MergePatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Body([]byte(`{"spec":{"replicas": 5}}`)).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to update number of replicas with merge patch: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 5)
// Re-apply, we should get conflicts now, since the number of replicas was changed.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when applying object after updating replicas, got no error: %s", result)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when applying object after updating replicas, got: %v", status.Status().Details.Causes)
}
// Re-apply with force, should work fine.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Version, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("force", "true").
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to apply object with force after updating replicas: %v:\n%v", err, string(result))
}
verifyReplicas(t, result, 1)
}
func getManagedFields(rawResponse []byte) ([]metav1.ManagedFieldsEntry, error) {
obj := unstructured.Unstructured{}
if err := obj.UnmarshalJSON(rawResponse); err != nil {
return nil, err
}
return obj.GetManagedFields(), nil
}
func TestDefaultMissingKeyCRD(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
if err != nil {
t.Fatal(err)
}
defer server.TearDownFn()
config := server.ClientConfig
apiExtensionClient, err := clientset.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
t.Fatal(err)
}
noxuDefinition := fixtures.NewNoxuV1CustomResourceDefinition(apiextensionsv1.ClusterScoped)
err = json.Unmarshal([]byte(`{
"openAPIV3Schema": {
"type": "object",
"properties": {
"spec": {
"type": "object",
"x-kubernetes-preserve-unknown-fields": true,
"properties": {
"cronSpec": {
"type": "string",
"pattern": "^(\\d+|\\*)(/\\d+)?(\\s+(\\d+|\\*)(/\\d+)?){4}$"
},
"ports": {
"type": "array",
"x-kubernetes-list-map-keys": [
"containerPort",
"protocol"
],
"x-kubernetes-list-type": "map",
"items": {
"properties": {
"containerPort": {
"format": "int32",
"type": "integer"
},
"hostIP": {
"type": "string"
},
"hostPort": {
"format": "int32",
"type": "integer"
},
"name": {
"type": "string"
},
"protocol": {
"default": "TCP",
"type": "string"
}
},
"required": [
"containerPort"
],
"type": "object"
}
}
}
}
}
}
}`), &noxuDefinition.Spec.Versions[0].Schema)
if err != nil {
t.Fatal(err)
}
noxuDefinition, err = fixtures.CreateNewV1CustomResourceDefinition(noxuDefinition, apiExtensionClient, dynamicClient)
if err != nil {
t.Fatal(err)
}
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
rest := apiExtensionClient.Discovery().RESTClient()
yamlBody := []byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
finalizers:
- test-finalizer
spec:
cronSpec: "* * * * */5"
replicas: 1
ports:
- name: x
containerPort: 80`, apiVersion, kind, name))
result, err := rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test").
Body(yamlBody).
DoRaw(context.TODO())
if err != nil {
t.Fatalf("failed to create custom resource with apply: %v:\n%v", err, string(result))
}
// New applier tries to edit an existing list item, we should get conflicts.
result, err = rest.Patch(types.ApplyPatchType).
AbsPath("/apis", noxuDefinition.Spec.Group, noxuDefinition.Spec.Versions[0].Name, noxuDefinition.Spec.Names.Plural).
Name(name).
Param("fieldManager", "apply_test_2").
Body([]byte(fmt.Sprintf(`
apiVersion: %s
kind: %s
metadata:
name: %s
spec:
ports:
- name: "y"
containerPort: 80
protocol: TCP`, apiVersion, kind, name))).
DoRaw(context.TODO())
if err == nil {
t.Fatalf("Expecting to get conflicts when a different applier updates existing list item, got no error: %s", result)
}
status, ok := err.(*apierrors.StatusError)
if !ok {
t.Fatalf("Expecting to get conflicts as API error")
}
if len(status.Status().Details.Causes) != 1 {
t.Fatalf("Expecting to get one conflict when a different applier updates existing list item, got: %v", status.Status().Details.Causes)
}
}
| test/integration/apiserver/apply/apply_crd_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0018631969578564167,
0.0002985649334732443,
0.00015828220057301223,
0.00017196821863763034,
0.0003077724832110107
] |
{
"id": 9,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\tmustCheckData = false\n",
"\t\t\t// Retry\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\n",
"\t\tdata, err := runtime.Encode(s.codec, ret)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 352
} | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/types/metrics.proto
package types
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
types "github.com/gogo/protobuf/types"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
time "time"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type Metric struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
Data *types.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) {
return fileDescriptor_8d594d87edf6e6bc, []int{0}
}
func (m *Metric) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *Metric) XXX_Merge(src proto.Message) {
xxx_messageInfo_Metric.Merge(m, src)
}
func (m *Metric) XXX_Size() int {
return m.Size()
}
func (m *Metric) XXX_DiscardUnknown() {
xxx_messageInfo_Metric.DiscardUnknown(m)
}
var xxx_messageInfo_Metric proto.InternalMessageInfo
func init() {
proto.RegisterType((*Metric)(nil), "containerd.types.Metric")
}
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptor_8d594d87edf6e6bc)
}
var fileDescriptor_8d594d87edf6e6bc = []byte{
// 258 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
0x00, 0x00,
}
func (m *Metric) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if m.Data != nil {
{
size, err := m.Data.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintMetrics(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1a
}
if len(m.ID) > 0 {
i -= len(m.ID)
copy(dAtA[i:], m.ID)
i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID)))
i--
dAtA[i] = 0x12
}
n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
if err2 != nil {
return 0, err2
}
i -= n2
i = encodeVarintMetrics(dAtA, i, uint64(n2))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
offset -= sovMetrics(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Metric) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
n += 1 + l + sovMetrics(uint64(l))
l = len(m.ID)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if m.Data != nil {
l = m.Data.Size()
n += 1 + l + sovMetrics(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovMetrics(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozMetrics(x uint64) (n int) {
return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Metric) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Metric{`,
`Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
return s
}
func valueToStringMetrics(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Metric) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Metric: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthMetrics
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Data == nil {
m.Data = &types.Any{}
}
if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMetrics(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthMetrics
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupMetrics
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthMetrics
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group")
)
| vendor/github.com/containerd/containerd/api/types/metrics.pb.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.8904421329498291,
0.019569817930459976,
0.00016232400957960635,
0.0001716883125482127,
0.129822239279747
] |
{
"id": 10,
"code_window": [
"\t\tif !origState.stale && bytes.Equal(data, origState.data) {\n",
"\t\t\t// if we skipped the original Get in this loop, we must refresh from\n",
"\t\t\t// etcd in order to be sure the data in the store is equivalent to\n",
"\t\t\t// our desired serialization\n",
"\t\t\tif mustCheckData {\n",
"\t\t\t\torigState, err = getCurrentState()\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif !origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 365
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
// Versioner abstracts setting and retrieving metadata fields from database response
// onto the object ot list. It is required to maintain storage invariants - updating an
// object twice with the same data except for the ResourceVersion and SelfLink must be
// a no-op. A resourceVersion of type uint64 is a 'raw' resourceVersion,
// intended to be sent directly to or from the backend. A resourceVersion of
// type string is a 'safe' resourceVersion, intended for consumption by users.
type Versioner interface {
// UpdateObject sets storage metadata into an API object. Returns an error if the object
// cannot be updated correctly. May return nil if the requested object does not need metadata
// from database.
UpdateObject(obj runtime.Object, resourceVersion uint64) error
// UpdateList sets the resource version into an API list object. Returns an error if the object
// cannot be updated correctly. May return nil if the requested object does not need metadata from
// database. continueValue is optional and indicates that more results are available if the client
// passes that value to the server in a subsequent call. remainingItemCount indicates the number
// of remaining objects if the list is partial. The remainingItemCount field is omitted during
// serialization if it is set to nil.
UpdateList(obj runtime.Object, resourceVersion uint64, continueValue string, remainingItemCount *int64) error
// PrepareObjectForStorage should set SelfLink and ResourceVersion to the empty value. Should
// return an error if the specified object cannot be updated.
PrepareObjectForStorage(obj runtime.Object) error
// ObjectResourceVersion returns the resource version (for persistence) of the specified object.
// Should return an error if the specified object does not have a persistable version.
ObjectResourceVersion(obj runtime.Object) (uint64, error)
// ParseResourceVersion takes a resource version argument and
// converts it to the storage backend. For watch we should pass to helper.Watch().
// Because resourceVersion is an opaque value, the default watch
// behavior for non-zero watch is to watch the next value (if you pass
// "1", you will see updates from "2" onwards).
ParseResourceVersion(resourceVersion string) (uint64, error)
}
// ResponseMeta contains information about the database metadata that is associated with
// an object. It abstracts the actual underlying objects to prevent coupling with concrete
// database and to improve testability.
type ResponseMeta struct {
// TTL is the time to live of the node that contained the returned object. It may be
// zero or negative in some cases (objects may be expired after the requested
// expiration time due to server lag).
TTL int64
// The resource version of the node that contained the returned object.
ResourceVersion uint64
}
// IndexerFunc is a function that for a given object computes
// <value of an index> for a particular <index>.
type IndexerFunc func(obj runtime.Object) string
// IndexerFuncs is a mapping from <index name> to function that
// for a given object computes <value for that index>.
type IndexerFuncs map[string]IndexerFunc
// Everything accepts all objects.
var Everything = SelectionPredicate{
Label: labels.Everything(),
Field: fields.Everything(),
}
// MatchValue defines a pair (<index name>, <value for that index>).
type MatchValue struct {
IndexName string
Value string
}
// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update
// that is guaranteed to succeed.
// See the comment for GuaranteedUpdate for more details.
type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error)
// ValidateObjectFunc is a function to act on a given object. An error may be returned
// if the hook cannot be completed. The function may NOT transform the provided
// object.
type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error
// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc.
func ValidateAllObjectFunc(ctx context.Context, obj runtime.Object) error {
return nil
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty"`
// Specifies the target ResourceVersion
// +optional
ResourceVersion *string `json:"resourceVersion,omitempty"`
}
// NewUIDPreconditions returns a Preconditions with UID set.
func NewUIDPreconditions(uid string) *Preconditions {
u := types.UID(uid)
return &Preconditions{UID: &u}
}
func (p *Preconditions) Check(key string, obj runtime.Object) error {
if p == nil {
return nil
}
objMeta, err := meta.Accessor(obj)
if err != nil {
return NewInternalErrorf(
"can't enforce preconditions %v on un-introspectable object %v, got error: %v",
*p,
obj,
err)
}
if p.UID != nil && *p.UID != objMeta.GetUID() {
err := fmt.Sprintf(
"Precondition failed: UID in precondition: %v, UID in object meta: %v",
*p.UID,
objMeta.GetUID())
return NewInvalidObjError(key, err)
}
if p.ResourceVersion != nil && *p.ResourceVersion != objMeta.GetResourceVersion() {
err := fmt.Sprintf(
"Precondition failed: ResourceVersion in precondition: %v, ResourceVersion in object meta: %v",
*p.ResourceVersion,
objMeta.GetResourceVersion())
return NewInvalidObjError(key, err)
}
return nil
}
// Interface offers a common interface for object marshaling/unmarshaling operations and
// hides all the storage-related operations behind it.
type Interface interface {
// Returns Versioner associated with this interface.
Versioner() Versioner
// Create adds a new object at a key unless it already exists. 'ttl' is time-to-live
// in seconds (0 means forever). If no error is returned and out is not nil, out will be
// set to the read value from database.
Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error
// Delete removes the specified key and returns the value that existed at that spot.
// If key didn't exist, it will return NotFound storage error.
// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the
// current version of the object to avoid read operation from storage to get it.
// However, the implementations have to retry in case suggestion is stale.
Delete(
ctx context.Context, key string, out runtime.Object, preconditions *Preconditions,
validateDeletion ValidateObjectFunc, cachedExistingObject runtime.Object) error
// Watch begins watching the specified key. Events are decoded into API objects,
// and any items selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will get current object at given key
// and send it in an "ADDED" event, before watch starts.
Watch(ctx context.Context, key string, opts ListOptions) (watch.Interface, error)
// WatchList begins watching the specified key's items. Items are decoded into API
// objects and any item selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will list current objects directory defined by key
// and send them in "ADDED" events, before watch starts.
WatchList(ctx context.Context, key string, opts ListOptions) (watch.Interface, error)
// Get unmarshals json found at key into objPtr. On a not found error, will either
// return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'.
// Treats empty responses and nil response nodes exactly like a not found error.
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
Get(ctx context.Context, key string, opts GetOptions, objPtr runtime.Object) error
// GetToList unmarshals json found at key and opaque it into *List api object
// (an object that satisfies the runtime.IsList definition).
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
GetToList(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error
// List unmarshalls jsons found at directory defined by key and opaque them
// into *List api object (an object that satisfies runtime.IsList definition).
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
List(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error
// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType')
// retrying the update until success if there is index conflict.
// Note that object passed to tryUpdate may change across invocations of tryUpdate() if
// other writers are simultaneously updating it, so tryUpdate() needs to take into account
// the current contents of the object when deciding how the update object should look.
// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false
// or zero value in 'ptrToType' parameter otherwise.
// If the object to update has the same value as previous, it won't do any update
// but will return the object in 'ptrToType' parameter.
// If 'suggestion' is non-nil, it can be used as a suggestion about the current version
// of the object to avoid read operation from storage to get it. However, the
// implementations have to retry in case suggestion is stale.
//
// Example:
//
// s := /* implementation of Interface */
// err := s.GuaranteedUpdate(
// "myKey", &MyType{}, true,
// func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) {
// // Before each invocation of the user defined function, "input" is reset to
// // current contents for "myKey" in database.
// curr := input.(*MyType) // Guaranteed to succeed.
//
// // Make the modification
// curr.Counter++
//
// // Return the modified object - return an error to stop iterating. Return
// // a uint64 to alter the TTL on the object, or nil to keep it the same value.
// return cur, nil, nil
// },
// )
GuaranteedUpdate(
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
precondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error
// Count returns number of different entries under the key (generally being path prefix).
Count(key string) (int64, error)
}
// GetOptions provides the options that may be provided for storage get operations.
type GetOptions struct {
// IgnoreNotFound determines what is returned if the requested object is not found. If
// true, a zero object is returned. If false, an error is returned.
IgnoreNotFound bool
// ResourceVersion provides a resource version constraint to apply to the get operation
// as a "not older than" constraint: the result contains data at least as new as the provided
// ResourceVersion. The newest available data is preferred, but any data not older than this
// ResourceVersion may be served.
ResourceVersion string
}
// ListOptions provides the options that may be provided for storage list operations.
type ListOptions struct {
// ResourceVersion provides a resource version constraint to apply to the list operation
// as a "not older than" constraint: the result contains data at least as new as the provided
// ResourceVersion. The newest available data is preferred, but any data not older than this
// ResourceVersion may be served.
ResourceVersion string
// ResourceVersionMatch provides the rule for how the resource version constraint applies. If set
// to the default value "" the legacy resource version semantic apply.
ResourceVersionMatch metav1.ResourceVersionMatch
// Predicate provides the selection rules for the list operation.
Predicate SelectionPredicate
// ProgressNotify determines whether storage-originated bookmark (progress notify) events should
// be delivered to the users. The option is ignored for non-watch requests.
ProgressNotify bool
}
| staging/src/k8s.io/apiserver/pkg/storage/interfaces.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00164900254458189,
0.00029402237851172686,
0.00016242310812231153,
0.00016857542505022138,
0.0003471016825642437
] |
{
"id": 10,
"code_window": [
"\t\tif !origState.stale && bytes.Equal(data, origState.data) {\n",
"\t\t\t// if we skipped the original Get in this loop, we must refresh from\n",
"\t\t\t// etcd in order to be sure the data in the store is equivalent to\n",
"\t\t\t// our desired serialization\n",
"\t\t\tif mustCheckData {\n",
"\t\t\t\torigState, err = getCurrentState()\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif !origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 365
} | package humanize
import "strconv"
// Ordinal gives you the input number in a rank/ordinal format.
//
// Ordinal(3) -> 3rd
func Ordinal(x int) string {
suffix := "th"
switch x % 10 {
case 1:
if x%100 != 11 {
suffix = "st"
}
case 2:
if x%100 != 12 {
suffix = "nd"
}
case 3:
if x%100 != 13 {
suffix = "rd"
}
}
return strconv.Itoa(x) + suffix
}
| vendor/github.com/dustin/go-humanize/ordinals.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.000174105676705949,
0.0001726657064864412,
0.0001716784026939422,
0.00017221305461134762,
0.000001041341533891682
] |
{
"id": 10,
"code_window": [
"\t\tif !origState.stale && bytes.Equal(data, origState.data) {\n",
"\t\t\t// if we skipped the original Get in this loop, we must refresh from\n",
"\t\t\t// etcd in order to be sure the data in the store is equivalent to\n",
"\t\t\t// our desired serialization\n",
"\t\t\tif mustCheckData {\n",
"\t\t\t\torigState, err = getCurrentState()\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif !origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 365
} | apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: php-apache
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: php-apache
minReplicas: 1
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
targetAverageUtilization: 50
| test/fixtures/pkg/kubectl/cmd/convert/v2beta1HPA.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017611696966923773,
0.0001754272379912436,
0.00017473750631324947,
0.0001754272379912436,
6.89731677994132e-7
] |
{
"id": 10,
"code_window": [
"\t\tif !origState.stale && bytes.Equal(data, origState.data) {\n",
"\t\t\t// if we skipped the original Get in this loop, we must refresh from\n",
"\t\t\t// etcd in order to be sure the data in the store is equivalent to\n",
"\t\t\t// our desired serialization\n",
"\t\t\tif mustCheckData {\n",
"\t\t\t\torigState, err = getCurrentState()\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif !origStateIsCurrent {\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 365
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
// ClusterRoleListerExpansion allows custom methods to be added to
// ClusterRoleLister.
type ClusterRoleListerExpansion interface{}
// ClusterRoleBindingListerExpansion allows custom methods to be added to
// ClusterRoleBindingLister.
type ClusterRoleBindingListerExpansion interface{}
// RoleListerExpansion allows custom methods to be added to
// RoleLister.
type RoleListerExpansion interface{}
// RoleNamespaceListerExpansion allows custom methods to be added to
// RoleNamespaceLister.
type RoleNamespaceListerExpansion interface{}
// RoleBindingListerExpansion allows custom methods to be added to
// RoleBindingLister.
type RoleBindingListerExpansion interface{}
// RoleBindingNamespaceListerExpansion allows custom methods to be added to
// RoleBindingNamespaceLister.
type RoleBindingNamespaceListerExpansion interface{}
| staging/src/k8s.io/client-go/listers/rbac/v1/expansion_generated.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0003500515886116773,
0.00021654779266100377,
0.00016771172522567213,
0.00017952488269656897,
0.00006793135980842635
] |
{
"id": 11,
"code_window": [
"\t\t\t\torigState, err = getCurrentState()\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n",
"\t\t\t\tmustCheckData = false\n",
"\t\t\t\tif !bytes.Equal(data, origState.data) {\n",
"\t\t\t\t\t// original data changed, restart loop\n",
"\t\t\t\t\tcontinue\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 370
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.9972133040428162,
0.0740298479795456,
0.00015902650193311274,
0.00017470834427513182,
0.2543081045150757
] |
{
"id": 11,
"code_window": [
"\t\t\t\torigState, err = getCurrentState()\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n",
"\t\t\t\tmustCheckData = false\n",
"\t\t\t\tif !bytes.Equal(data, origState.data) {\n",
"\t\t\t\t\t// original data changed, restart loop\n",
"\t\t\t\t\tcontinue\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 370
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["cow.go"],
importmap = "k8s.io/kubernetes/vendor/github.com/Microsoft/hcsshim/internal/cow",
importpath = "github.com/Microsoft/hcsshim/internal/cow",
visibility = ["//vendor/github.com/Microsoft/hcsshim:__subpackages__"],
deps = [
"//vendor/github.com/Microsoft/hcsshim/internal/schema1:go_default_library",
"//vendor/github.com/Microsoft/hcsshim/internal/schema2:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/Microsoft/hcsshim/internal/cow/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017766124801710248,
0.00017584273882675916,
0.00017290955292992294,
0.00017695744463708252,
0.00000209388986149861
] |
{
"id": 11,
"code_window": [
"\t\t\t\torigState, err = getCurrentState()\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n",
"\t\t\t\tmustCheckData = false\n",
"\t\t\t\tif !bytes.Equal(data, origState.data) {\n",
"\t\t\t\t\t// original data changed, restart loop\n",
"\t\t\t\t\tcontinue\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 370
} | // This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication
// license. Its contents can be found at:
// http://creativecommons.org/publicdomain/zero/1.0/
package bindata
import (
"io"
)
const lowerHex = "0123456789abcdef"
type StringWriter struct {
io.Writer
c int
}
func (w *StringWriter) Write(p []byte) (n int, err error) {
if len(p) == 0 {
return
}
buf := []byte(`\x00`)
var b byte
for n, b = range p {
buf[2] = lowerHex[b/16]
buf[3] = lowerHex[b%16]
w.Writer.Write(buf)
w.c++
}
n++
return
}
| vendor/github.com/go-bindata/go-bindata/stringwriter.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017854926409199834,
0.00017557253886479884,
0.00016725962632335722,
0.00017824064707383513,
0.0000048014494495873805
] |
{
"id": 11,
"code_window": [
"\t\t\t\torigState, err = getCurrentState()\n",
"\t\t\t\tif err != nil {\n",
"\t\t\t\t\treturn err\n",
"\t\t\t\t}\n",
"\t\t\t\tmustCheckData = false\n",
"\t\t\t\tif !bytes.Equal(data, origState.data) {\n",
"\t\t\t\t\t// original data changed, restart loop\n",
"\t\t\t\t\tcontinue\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 370
} | package(default_visibility = ["//visibility:public"])
load(
"//build:go.bzl",
go_binary = "go_binary_conditional_pure",
)
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_binary(
name = "gendocs",
embed = [":go_default_library"],
)
go_library(
name = "go_default_library",
srcs = ["gen_kubectl_docs.go"],
importpath = "k8s.io/kubernetes/cmd/gendocs",
deps = [
"//cmd/genutils:go_default_library",
"//staging/src/k8s.io/kubectl/pkg/cmd:go_default_library",
"//vendor/github.com/spf13/cobra/doc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| cmd/gendocs/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017673653201200068,
0.00017548594041727483,
0.00017306393419858068,
0.00017607168410904706,
0.0000014827693348706816
] |
{
"id": 12,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\ttrace.Step(\"Retry value restored\")\n",
"\t\t\tmustCheckData = false\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tputResp := txnResp.Responses[0].GetResponsePut()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 414
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
)
// Versioner abstracts setting and retrieving metadata fields from database response
// onto the object ot list. It is required to maintain storage invariants - updating an
// object twice with the same data except for the ResourceVersion and SelfLink must be
// a no-op. A resourceVersion of type uint64 is a 'raw' resourceVersion,
// intended to be sent directly to or from the backend. A resourceVersion of
// type string is a 'safe' resourceVersion, intended for consumption by users.
type Versioner interface {
// UpdateObject sets storage metadata into an API object. Returns an error if the object
// cannot be updated correctly. May return nil if the requested object does not need metadata
// from database.
UpdateObject(obj runtime.Object, resourceVersion uint64) error
// UpdateList sets the resource version into an API list object. Returns an error if the object
// cannot be updated correctly. May return nil if the requested object does not need metadata from
// database. continueValue is optional and indicates that more results are available if the client
// passes that value to the server in a subsequent call. remainingItemCount indicates the number
// of remaining objects if the list is partial. The remainingItemCount field is omitted during
// serialization if it is set to nil.
UpdateList(obj runtime.Object, resourceVersion uint64, continueValue string, remainingItemCount *int64) error
// PrepareObjectForStorage should set SelfLink and ResourceVersion to the empty value. Should
// return an error if the specified object cannot be updated.
PrepareObjectForStorage(obj runtime.Object) error
// ObjectResourceVersion returns the resource version (for persistence) of the specified object.
// Should return an error if the specified object does not have a persistable version.
ObjectResourceVersion(obj runtime.Object) (uint64, error)
// ParseResourceVersion takes a resource version argument and
// converts it to the storage backend. For watch we should pass to helper.Watch().
// Because resourceVersion is an opaque value, the default watch
// behavior for non-zero watch is to watch the next value (if you pass
// "1", you will see updates from "2" onwards).
ParseResourceVersion(resourceVersion string) (uint64, error)
}
// ResponseMeta contains information about the database metadata that is associated with
// an object. It abstracts the actual underlying objects to prevent coupling with concrete
// database and to improve testability.
type ResponseMeta struct {
// TTL is the time to live of the node that contained the returned object. It may be
// zero or negative in some cases (objects may be expired after the requested
// expiration time due to server lag).
TTL int64
// The resource version of the node that contained the returned object.
ResourceVersion uint64
}
// IndexerFunc is a function that for a given object computes
// <value of an index> for a particular <index>.
type IndexerFunc func(obj runtime.Object) string
// IndexerFuncs is a mapping from <index name> to function that
// for a given object computes <value for that index>.
type IndexerFuncs map[string]IndexerFunc
// Everything accepts all objects.
var Everything = SelectionPredicate{
Label: labels.Everything(),
Field: fields.Everything(),
}
// MatchValue defines a pair (<index name>, <value for that index>).
type MatchValue struct {
IndexName string
Value string
}
// Pass an UpdateFunc to Interface.GuaranteedUpdate to make an update
// that is guaranteed to succeed.
// See the comment for GuaranteedUpdate for more details.
type UpdateFunc func(input runtime.Object, res ResponseMeta) (output runtime.Object, ttl *uint64, err error)
// ValidateObjectFunc is a function to act on a given object. An error may be returned
// if the hook cannot be completed. The function may NOT transform the provided
// object.
type ValidateObjectFunc func(ctx context.Context, obj runtime.Object) error
// ValidateAllObjectFunc is a "admit everything" instance of ValidateObjectFunc.
func ValidateAllObjectFunc(ctx context.Context, obj runtime.Object) error {
return nil
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty"`
// Specifies the target ResourceVersion
// +optional
ResourceVersion *string `json:"resourceVersion,omitempty"`
}
// NewUIDPreconditions returns a Preconditions with UID set.
func NewUIDPreconditions(uid string) *Preconditions {
u := types.UID(uid)
return &Preconditions{UID: &u}
}
func (p *Preconditions) Check(key string, obj runtime.Object) error {
if p == nil {
return nil
}
objMeta, err := meta.Accessor(obj)
if err != nil {
return NewInternalErrorf(
"can't enforce preconditions %v on un-introspectable object %v, got error: %v",
*p,
obj,
err)
}
if p.UID != nil && *p.UID != objMeta.GetUID() {
err := fmt.Sprintf(
"Precondition failed: UID in precondition: %v, UID in object meta: %v",
*p.UID,
objMeta.GetUID())
return NewInvalidObjError(key, err)
}
if p.ResourceVersion != nil && *p.ResourceVersion != objMeta.GetResourceVersion() {
err := fmt.Sprintf(
"Precondition failed: ResourceVersion in precondition: %v, ResourceVersion in object meta: %v",
*p.ResourceVersion,
objMeta.GetResourceVersion())
return NewInvalidObjError(key, err)
}
return nil
}
// Interface offers a common interface for object marshaling/unmarshaling operations and
// hides all the storage-related operations behind it.
type Interface interface {
// Returns Versioner associated with this interface.
Versioner() Versioner
// Create adds a new object at a key unless it already exists. 'ttl' is time-to-live
// in seconds (0 means forever). If no error is returned and out is not nil, out will be
// set to the read value from database.
Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error
// Delete removes the specified key and returns the value that existed at that spot.
// If key didn't exist, it will return NotFound storage error.
// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the
// current version of the object to avoid read operation from storage to get it.
// However, the implementations have to retry in case suggestion is stale.
Delete(
ctx context.Context, key string, out runtime.Object, preconditions *Preconditions,
validateDeletion ValidateObjectFunc, cachedExistingObject runtime.Object) error
// Watch begins watching the specified key. Events are decoded into API objects,
// and any items selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will get current object at given key
// and send it in an "ADDED" event, before watch starts.
Watch(ctx context.Context, key string, opts ListOptions) (watch.Interface, error)
// WatchList begins watching the specified key's items. Items are decoded into API
// objects and any item selected by 'p' are sent down to returned watch.Interface.
// resourceVersion may be used to specify what version to begin watching,
// which should be the current resourceVersion, and no longer rv+1
// (e.g. reconnecting without missing any updates).
// If resource version is "0", this interface will list current objects directory defined by key
// and send them in "ADDED" events, before watch starts.
WatchList(ctx context.Context, key string, opts ListOptions) (watch.Interface, error)
// Get unmarshals json found at key into objPtr. On a not found error, will either
// return a zero object of the requested type, or an error, depending on 'opts.ignoreNotFound'.
// Treats empty responses and nil response nodes exactly like a not found error.
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
Get(ctx context.Context, key string, opts GetOptions, objPtr runtime.Object) error
// GetToList unmarshals json found at key and opaque it into *List api object
// (an object that satisfies the runtime.IsList definition).
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
GetToList(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error
// List unmarshalls jsons found at directory defined by key and opaque them
// into *List api object (an object that satisfies runtime.IsList definition).
// The returned contents may be delayed, but it is guaranteed that they will
// match 'opts.ResourceVersion' according 'opts.ResourceVersionMatch'.
List(ctx context.Context, key string, opts ListOptions, listObj runtime.Object) error
// GuaranteedUpdate keeps calling 'tryUpdate()' to update key 'key' (of type 'ptrToType')
// retrying the update until success if there is index conflict.
// Note that object passed to tryUpdate may change across invocations of tryUpdate() if
// other writers are simultaneously updating it, so tryUpdate() needs to take into account
// the current contents of the object when deciding how the update object should look.
// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false
// or zero value in 'ptrToType' parameter otherwise.
// If the object to update has the same value as previous, it won't do any update
// but will return the object in 'ptrToType' parameter.
// If 'suggestion' is non-nil, it can be used as a suggestion about the current version
// of the object to avoid read operation from storage to get it. However, the
// implementations have to retry in case suggestion is stale.
//
// Example:
//
// s := /* implementation of Interface */
// err := s.GuaranteedUpdate(
// "myKey", &MyType{}, true,
// func(input runtime.Object, res ResponseMeta) (runtime.Object, *uint64, error) {
// // Before each invocation of the user defined function, "input" is reset to
// // current contents for "myKey" in database.
// curr := input.(*MyType) // Guaranteed to succeed.
//
// // Make the modification
// curr.Counter++
//
// // Return the modified object - return an error to stop iterating. Return
// // a uint64 to alter the TTL on the object, or nil to keep it the same value.
// return cur, nil, nil
// },
// )
GuaranteedUpdate(
ctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,
precondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error
// Count returns number of different entries under the key (generally being path prefix).
Count(key string) (int64, error)
}
// GetOptions provides the options that may be provided for storage get operations.
type GetOptions struct {
// IgnoreNotFound determines what is returned if the requested object is not found. If
// true, a zero object is returned. If false, an error is returned.
IgnoreNotFound bool
// ResourceVersion provides a resource version constraint to apply to the get operation
// as a "not older than" constraint: the result contains data at least as new as the provided
// ResourceVersion. The newest available data is preferred, but any data not older than this
// ResourceVersion may be served.
ResourceVersion string
}
// ListOptions provides the options that may be provided for storage list operations.
type ListOptions struct {
// ResourceVersion provides a resource version constraint to apply to the list operation
// as a "not older than" constraint: the result contains data at least as new as the provided
// ResourceVersion. The newest available data is preferred, but any data not older than this
// ResourceVersion may be served.
ResourceVersion string
// ResourceVersionMatch provides the rule for how the resource version constraint applies. If set
// to the default value "" the legacy resource version semantic apply.
ResourceVersionMatch metav1.ResourceVersionMatch
// Predicate provides the selection rules for the list operation.
Predicate SelectionPredicate
// ProgressNotify determines whether storage-originated bookmark (progress notify) events should
// be delivered to the users. The option is ignored for non-watch requests.
ProgressNotify bool
}
| staging/src/k8s.io/apiserver/pkg/storage/interfaces.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0014449828304350376,
0.00026223555323667824,
0.00016291727661155164,
0.00016907815006561577,
0.00028158852364867926
] |
{
"id": 12,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\ttrace.Step(\"Retry value restored\")\n",
"\t\t\tmustCheckData = false\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tputResp := txnResp.Responses[0].GetResponsePut()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 414
} | package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// InterfaceIPConfigurationsClient is the network Client
type InterfaceIPConfigurationsClient struct {
BaseClient
}
// NewInterfaceIPConfigurationsClient creates an instance of the InterfaceIPConfigurationsClient client.
func NewInterfaceIPConfigurationsClient(subscriptionID string) InterfaceIPConfigurationsClient {
return NewInterfaceIPConfigurationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewInterfaceIPConfigurationsClientWithBaseURI creates an instance of the InterfaceIPConfigurationsClient client
// using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign
// clouds, Azure stack).
func NewInterfaceIPConfigurationsClientWithBaseURI(baseURI string, subscriptionID string) InterfaceIPConfigurationsClient {
return InterfaceIPConfigurationsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// Get gets the specified network interface ip configuration.
// Parameters:
// resourceGroupName - the name of the resource group.
// networkInterfaceName - the name of the network interface.
// IPConfigurationName - the name of the ip configuration name.
func (client InterfaceIPConfigurationsClient) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, IPConfigurationName string) (result InterfaceIPConfiguration, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceIPConfigurationsClient.Get")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.GetPreparer(ctx, resourceGroupName, networkInterfaceName, IPConfigurationName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "Get", nil, "Failure preparing request")
return
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "Get", resp, "Failure sending request")
return
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client InterfaceIPConfigurationsClient) GetPreparer(ctx context.Context, resourceGroupName string, networkInterfaceName string, IPConfigurationName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"ipConfigurationName": autorest.Encode("path", IPConfigurationName),
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client InterfaceIPConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client InterfaceIPConfigurationsClient) GetResponder(resp *http.Response) (result InterfaceIPConfiguration, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List get all ip configurations in a network interface.
// Parameters:
// resourceGroupName - the name of the resource group.
// networkInterfaceName - the name of the network interface.
func (client InterfaceIPConfigurationsClient) List(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result InterfaceIPConfigurationListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceIPConfigurationsClient.List")
defer func() {
sc := -1
if result.iiclr.Response.Response != nil {
sc = result.iiclr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName, networkInterfaceName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.iiclr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", resp, "Failure sending request")
return
}
result.iiclr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client InterfaceIPConfigurationsClient) ListPreparer(ctx context.Context, resourceGroupName string, networkInterfaceName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"networkInterfaceName": autorest.Encode("path", networkInterfaceName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2019-06-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client InterfaceIPConfigurationsClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client InterfaceIPConfigurationsClient) ListResponder(resp *http.Response) (result InterfaceIPConfigurationListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client InterfaceIPConfigurationsClient) listNextResults(ctx context.Context, lastResults InterfaceIPConfigurationListResult) (result InterfaceIPConfigurationListResult, err error) {
req, err := lastResults.interfaceIPConfigurationListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.InterfaceIPConfigurationsClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client InterfaceIPConfigurationsClient) ListComplete(ctx context.Context, resourceGroupName string, networkInterfaceName string) (result InterfaceIPConfigurationListResultIterator, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/InterfaceIPConfigurationsClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.page, err = client.List(ctx, resourceGroupName, networkInterfaceName)
return
}
| vendor/github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network/interfaceipconfigurations.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.001950613223016262,
0.0002971550857182592,
0.00016224849969148636,
0.00017908676818478853,
0.00036998288123868406
] |
{
"id": 12,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\ttrace.Step(\"Retry value restored\")\n",
"\t\t\tmustCheckData = false\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tputResp := txnResp.Responses[0].GetResponsePut()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 414
} | // Copyright ©2016 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !noasm,!appengine,!safe
#include "textflag.h"
// MOVSHDUP X3, X2
#define MOVSHDUP_X3_X2 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xD3
// MOVSLDUP X3, X3
#define MOVSLDUP_X3_X3 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xDB
// ADDSUBPS X2, X3
#define ADDSUBPS_X2_X3 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xDA
// MOVSHDUP X5, X4
#define MOVSHDUP_X5_X4 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xE5
// MOVSLDUP X5, X5
#define MOVSLDUP_X5_X5 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xED
// ADDSUBPS X4, X5
#define ADDSUBPS_X4_X5 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xEC
// MOVSHDUP X7, X6
#define MOVSHDUP_X7_X6 BYTE $0xF3; BYTE $0x0F; BYTE $0x16; BYTE $0xF7
// MOVSLDUP X7, X7
#define MOVSLDUP_X7_X7 BYTE $0xF3; BYTE $0x0F; BYTE $0x12; BYTE $0xFF
// ADDSUBPS X6, X7
#define ADDSUBPS_X6_X7 BYTE $0xF2; BYTE $0x0F; BYTE $0xD0; BYTE $0xFE
// MOVSHDUP X9, X8
#define MOVSHDUP_X9_X8 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x16; BYTE $0xC1
// MOVSLDUP X9, X9
#define MOVSLDUP_X9_X9 BYTE $0xF3; BYTE $0x45; BYTE $0x0F; BYTE $0x12; BYTE $0xC9
// ADDSUBPS X8, X9
#define ADDSUBPS_X8_X9 BYTE $0xF2; BYTE $0x45; BYTE $0x0F; BYTE $0xD0; BYTE $0xC8
// func AxpyUnitaryTo(dst []complex64, alpha complex64, x, y []complex64)
TEXT ·AxpyUnitaryTo(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DI // DI = &dst
MOVQ x_base+32(FP), SI // SI = &x
MOVQ y_base+56(FP), DX // DX = &y
MOVQ x_len+40(FP), CX
CMPQ y_len+64(FP), CX // CX = min( len(x), len(y), len(dst) )
CMOVQLE y_len+64(FP), CX
CMPQ dst_len+8(FP), CX
CMOVQLE dst_len+8(FP), CX
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
MOVSD alpha+24(FP), X0 // X0 = { 0, 0, imag(a), real(a) }
SHUFPD $0, X0, X0 // X0 = { imag(a), real(a), imag(a), real(a) }
MOVAPS X0, X1
SHUFPS $0x11, X1, X1 // X1 = { real(a), imag(a), real(a), imag(a) }
XORQ AX, AX // i = 0
MOVQ DX, BX // Align on 16-byte boundary for ADDPS
ANDQ $15, BX // BX = &y & 15
JZ caxy_no_trim // if BX == 0 { goto caxy_no_trim }
MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) }
MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]), real(a)*real(x[i]) - imag(a)*imag(x[i]) }
ADDSUBPS_X2_X3
MOVSD (DX)(AX*8), X4 // X3 += y[i]
ADDPS X4, X3
MOVSD X3, (DI)(AX*8) // dst[i] = X3
INCQ AX // i++
DECQ CX // --CX
JZ caxy_tail // if BX == 0 { goto caxy_tail }
caxy_no_trim:
MOVAPS X0, X10 // Copy X0 and X1 for pipelineing
MOVAPS X1, X11
MOVQ CX, BX
ANDQ $7, CX // CX = n % 8
SHRQ $3, BX // BX = floor( n / 8 )
JZ caxy_tail // if BX == 0 { goto caxy_tail }
caxy_loop:
// X_i = { imag(x[i]), real(x[i]), imag(x[i+1]), real(x[i+1]) }
MOVUPS (SI)(AX*8), X3
MOVUPS 16(SI)(AX*8), X5
MOVUPS 32(SI)(AX*8), X7
MOVUPS 48(SI)(AX*8), X9
// X_(i-1) = { imag(x[i]), imag(x[i]), imag(x[i]+1), imag(x[i]+1) }
MOVSHDUP_X3_X2
MOVSHDUP_X5_X4
MOVSHDUP_X7_X6
MOVSHDUP_X9_X8
// X_i = { real(x[i]), real(x[i]), real(x[i+1]), real(x[i+1]) }
MOVSLDUP_X3_X3
MOVSLDUP_X5_X5
MOVSLDUP_X7_X7
MOVSLDUP_X9_X9
// X_i = { imag(a) * real(x[i]), real(a) * real(x[i]),
// imag(a) * real(x[i+1]), real(a) * real(x[i+1]) }
// X_(i-1) = { real(a) * imag(x[i]), imag(a) * imag(x[i]),
// real(a) * imag(x[i+1]), imag(a) * imag(x[i+1]) }
MULPS X1, X2
MULPS X0, X3
MULPS X11, X4
MULPS X10, X5
MULPS X1, X6
MULPS X0, X7
MULPS X11, X8
MULPS X10, X9
// X_i = {
// imag(result[i]): imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(result[i]): real(a)*real(x[i]) - imag(a)*imag(x[i]),
// imag(result[i+1]): imag(a)*real(x[i+1]) + real(a)*imag(x[i+1]),
// real(result[i+1]): real(a)*real(x[i+1]) - imag(a)*imag(x[i+1]),
// }
ADDSUBPS_X2_X3
ADDSUBPS_X4_X5
ADDSUBPS_X6_X7
ADDSUBPS_X8_X9
// X_i = { imag(result[i]) + imag(y[i]), real(result[i]) + real(y[i]),
// imag(result[i+1]) + imag(y[i+1]), real(result[i+1]) + real(y[i+1]) }
ADDPS (DX)(AX*8), X3
ADDPS 16(DX)(AX*8), X5
ADDPS 32(DX)(AX*8), X7
ADDPS 48(DX)(AX*8), X9
MOVUPS X3, (DI)(AX*8) // y[i:i+1] = X_i
MOVUPS X5, 16(DI)(AX*8)
MOVUPS X7, 32(DI)(AX*8)
MOVUPS X9, 48(DI)(AX*8)
ADDQ $8, AX // i += 8
DECQ BX // --BX
JNZ caxy_loop // } while BX > 0
CMPQ CX, $0 // if CX == 0 { return }
JE caxy_end
caxy_tail: // do {
MOVSD (SI)(AX*8), X3 // X3 = { imag(x[i]), real(x[i]) }
MOVSHDUP_X3_X2 // X2 = { imag(x[i]), imag(x[i]) }
MOVSLDUP_X3_X3 // X3 = { real(x[i]), real(x[i]) }
MULPS X1, X2 // X2 = { real(a) * imag(x[i]), imag(a) * imag(x[i]) }
MULPS X0, X3 // X3 = { imag(a) * real(x[i]), real(a) * real(x[i]) }
// X3 = { imag(a)*real(x[i]) + real(a)*imag(x[i]),
// real(a)*real(x[i]) - imag(a)*imag(x[i]) }
ADDSUBPS_X2_X3
MOVSD (DX)(AX*8), X4 // X3 += y[i]
ADDPS X4, X3
MOVSD X3, (DI)(AX*8) // y[i] = X3
INCQ AX // ++i
LOOP caxy_tail // } while --CX > 0
caxy_end:
RET
| vendor/gonum.org/v1/gonum/internal/asm/c64/axpyunitaryto_amd64.s | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0002260178589494899,
0.00017313679563812912,
0.00016144201799761504,
0.00016900553600862622,
0.000014727054804097861
] |
{
"id": 12,
"code_window": [
"\t\t\tif err != nil {\n",
"\t\t\t\treturn err\n",
"\t\t\t}\n",
"\t\t\ttrace.Step(\"Retry value restored\")\n",
"\t\t\tmustCheckData = false\n",
"\t\t\tcontinue\n",
"\t\t}\n",
"\t\tputResp := txnResp.Responses[0].GetResponsePut()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\torigStateIsCurrent = true\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go",
"type": "replace",
"edit_start_line_idx": 414
} | #!/usr/bin/env bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script checks the description format of help message of kubectl command
# is valid or not. And this checking is done for all kubectl sub-commands.
# Usage: `hack/verify-cli-conventions.sh`.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
BINS=(
cmd/clicheck
)
make -C "${KUBE_ROOT}" WHAT="${BINS[*]}"
clicheck=$(kube::util::find-binary "clicheck")
if ! output=$($clicheck 2>&1)
then
echo "$output"
echo
echo "FAILURE: CLI is not following one or more required conventions."
exit 1
else
echo "$output"
echo
echo "SUCCESS: CLI is following all tested conventions."
fi
| hack/verify-cli-conventions.sh | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0001762464817147702,
0.000172754458617419,
0.00016428834351245314,
0.00017567520262673497,
0.000004537071163213113
] |
{
"id": 13,
"code_window": [
"\t// the current contents of the object when deciding how the update object should look.\n",
"\t// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false\n",
"\t// or zero value in 'ptrToType' parameter otherwise.\n",
"\t// If the object to update has the same value as previous, it won't do any update\n",
"\t// but will return the object in 'ptrToType' parameter.\n",
"\t// If 'suggestion' is non-nil, it can be used as a suggestion about the current version\n",
"\t// of the object to avoid read operation from storage to get it. However, the\n",
"\t// implementations have to retry in case suggestion is stale.\n",
"\t//\n",
"\t// Example:\n",
"\t//\n",
"\t// s := /* implementation of Interface */\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the\n",
"\t// current version of the object to avoid read operation from storage to get it.\n",
"\t// However, the implementations have to retry in case suggestion is stale.\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/interfaces.go",
"type": "replace",
"edit_start_line_idx": 222
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.03591546788811684,
0.0014180648140609264,
0.00016116394544951618,
0.0001712876110104844,
0.005436722654849291
] |
{
"id": 13,
"code_window": [
"\t// the current contents of the object when deciding how the update object should look.\n",
"\t// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false\n",
"\t// or zero value in 'ptrToType' parameter otherwise.\n",
"\t// If the object to update has the same value as previous, it won't do any update\n",
"\t// but will return the object in 'ptrToType' parameter.\n",
"\t// If 'suggestion' is non-nil, it can be used as a suggestion about the current version\n",
"\t// of the object to avoid read operation from storage to get it. However, the\n",
"\t// implementations have to retry in case suggestion is stale.\n",
"\t//\n",
"\t// Example:\n",
"\t//\n",
"\t// s := /* implementation of Interface */\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the\n",
"\t// current version of the object to avoid read operation from storage to get it.\n",
"\t// However, the implementations have to retry in case suggestion is stale.\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/interfaces.go",
"type": "replace",
"edit_start_line_idx": 222
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resttest
import (
"context"
"encoding/json"
"fmt"
"reflect"
"strings"
"testing"
"time"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/validation/path"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
)
// TODO(apelisse): Tests in this file should be more hermertic by always
// removing objects that they create. That would avoid name-collisions.
type Tester struct {
*testing.T
storage rest.Storage
clusterScope bool
createOnUpdate bool
generatesName bool
returnDeletedObject bool
namer func(int) string
}
func New(t *testing.T, storage rest.Storage) *Tester {
return &Tester{
T: t,
storage: storage,
namer: defaultNamer,
}
}
func defaultNamer(i int) string {
return fmt.Sprintf("foo%d", i)
}
// Namer allows providing a custom name maker
// By default "foo%d" is used
func (t *Tester) Namer(namer func(int) string) *Tester {
t.namer = namer
return t
}
func (t *Tester) ClusterScope() *Tester {
t.clusterScope = true
return t
}
func (t *Tester) AllowCreateOnUpdate() *Tester {
t.createOnUpdate = true
return t
}
func (t *Tester) GeneratesName() *Tester {
t.generatesName = true
return t
}
func (t *Tester) ReturnDeletedObject() *Tester {
t.returnDeletedObject = true
return t
}
// TestNamespace returns the namespace that will be used when creating contexts.
// Returns NamespaceNone for cluster-scoped objects.
func (t *Tester) TestNamespace() string {
if t.clusterScope {
return metav1.NamespaceNone
}
return "test"
}
// TestContext returns a namespaced context that will be used when making storage calls.
// Namespace is determined by TestNamespace()
func (t *Tester) TestContext() context.Context {
if t.clusterScope {
return genericapirequest.NewContext()
}
return genericapirequest.WithNamespace(genericapirequest.NewContext(), t.TestNamespace())
}
func (t *Tester) getObjectMetaOrFail(obj runtime.Object) metav1.Object {
objMeta, err := meta.Accessor(obj)
if err != nil {
t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, obj)
}
return objMeta
}
func (t *Tester) setObjectMeta(obj runtime.Object, name string) {
meta := t.getObjectMetaOrFail(obj)
meta.SetName(name)
if t.clusterScope {
meta.SetNamespace(metav1.NamespaceNone)
} else {
meta.SetNamespace(genericapirequest.NamespaceValue(t.TestContext()))
}
meta.SetGenerateName("")
meta.SetGeneration(1)
}
type AssignFunc func([]runtime.Object) []runtime.Object
type EmitFunc func(runtime.Object, string) error
type GetFunc func(context.Context, runtime.Object) (runtime.Object, error)
type InitWatchFunc func()
type InjectErrFunc func(err error)
type IsErrorFunc func(err error) bool
type CreateFunc func(context.Context, runtime.Object) error
type SetRVFunc func(uint64)
type UpdateFunc func(runtime.Object) runtime.Object
// Test creating an object.
func (t *Tester) TestCreate(valid runtime.Object, createFn CreateFunc, getFn GetFunc, invalid ...runtime.Object) {
dryRunOpts := metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}
opts := metav1.CreateOptions{}
t.testCreateHasMetadata(valid.DeepCopyObject())
if !t.generatesName {
t.testCreateGeneratesName(valid.DeepCopyObject())
}
t.testCreateDryRun(valid.DeepCopyObject(), getFn)
t.testCreateDryRunEquals(valid.DeepCopyObject())
t.testCreateEquals(valid.DeepCopyObject(), getFn)
t.testCreateAlreadyExisting(valid.DeepCopyObject(), createFn, dryRunOpts)
t.testCreateAlreadyExisting(valid.DeepCopyObject(), createFn, opts)
if t.clusterScope {
t.testCreateDiscardsObjectNamespace(valid.DeepCopyObject(), dryRunOpts)
t.testCreateDiscardsObjectNamespace(valid.DeepCopyObject(), opts)
t.testCreateIgnoresContextNamespace(valid.DeepCopyObject(), dryRunOpts)
t.testCreateIgnoresContextNamespace(valid.DeepCopyObject(), opts)
t.testCreateIgnoresMismatchedNamespace(valid.DeepCopyObject(), dryRunOpts)
t.testCreateIgnoresMismatchedNamespace(valid.DeepCopyObject(), opts)
t.testCreateResetsUserData(valid.DeepCopyObject(), dryRunOpts)
t.testCreateResetsUserData(valid.DeepCopyObject(), opts)
} else {
t.testCreateRejectsMismatchedNamespace(valid.DeepCopyObject(), dryRunOpts)
t.testCreateRejectsMismatchedNamespace(valid.DeepCopyObject(), opts)
}
t.testCreateInvokesValidation(dryRunOpts, invalid...)
t.testCreateInvokesValidation(opts, invalid...)
t.testCreateValidatesNames(valid.DeepCopyObject(), dryRunOpts)
t.testCreateValidatesNames(valid.DeepCopyObject(), opts)
t.testCreateIgnoreClusterName(valid.DeepCopyObject(), dryRunOpts)
t.testCreateIgnoreClusterName(valid.DeepCopyObject(), opts)
}
// Test updating an object.
func (t *Tester) TestUpdate(valid runtime.Object, createFn CreateFunc, getFn GetFunc, updateFn UpdateFunc, invalidUpdateFn ...UpdateFunc) {
dryRunOpts := metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}
opts := metav1.UpdateOptions{}
t.testUpdateEquals(valid.DeepCopyObject(), createFn, getFn, updateFn)
t.testUpdateFailsOnVersionTooOld(valid.DeepCopyObject(), createFn, getFn)
t.testUpdateOnNotFound(valid.DeepCopyObject(), dryRunOpts)
t.testUpdateOnNotFound(valid.DeepCopyObject(), opts)
if !t.clusterScope {
t.testUpdateRejectsMismatchedNamespace(valid.DeepCopyObject(), createFn, getFn)
}
t.testUpdateInvokesValidation(valid.DeepCopyObject(), createFn, invalidUpdateFn...)
t.testUpdateWithWrongUID(valid.DeepCopyObject(), createFn, getFn, dryRunOpts)
t.testUpdateWithWrongUID(valid.DeepCopyObject(), createFn, getFn, opts)
t.testUpdateRetrievesOldObject(valid.DeepCopyObject(), createFn, getFn)
t.testUpdatePropagatesUpdatedObjectError(valid.DeepCopyObject(), createFn, getFn, dryRunOpts)
t.testUpdatePropagatesUpdatedObjectError(valid.DeepCopyObject(), createFn, getFn, opts)
t.testUpdateIgnoreGenerationUpdates(valid.DeepCopyObject(), createFn, getFn)
t.testUpdateIgnoreClusterName(valid.DeepCopyObject(), createFn, getFn)
}
// Test deleting an object.
func (t *Tester) TestDelete(valid runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) {
dryRunOpts := metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}
opts := metav1.DeleteOptions{}
t.testDeleteNonExist(valid.DeepCopyObject(), dryRunOpts)
t.testDeleteNonExist(valid.DeepCopyObject(), opts)
t.testDeleteNoGraceful(valid.DeepCopyObject(), createFn, getFn, isNotFoundFn, true)
t.testDeleteNoGraceful(valid.DeepCopyObject(), createFn, getFn, isNotFoundFn, false)
t.testDeleteWithUID(valid.DeepCopyObject(), createFn, getFn, isNotFoundFn, dryRunOpts)
t.testDeleteWithUID(valid.DeepCopyObject(), createFn, getFn, isNotFoundFn, opts)
}
// Test gracefully deleting an object.
func (t *Tester) TestDeleteGraceful(valid runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
t.testDeleteDryRunGracefulHasdefault(valid.DeepCopyObject(), createFn, expectedGrace)
t.testDeleteGracefulHasDefault(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
t.testDeleteGracefulWithValue(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
t.testDeleteGracefulUsesZeroOnNil(valid.DeepCopyObject(), createFn, expectedGrace)
t.testDeleteGracefulExtend(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
t.testDeleteGracefulShorten(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
t.testDeleteGracefulImmediate(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
}
// Test getting object.
func (t *Tester) TestGet(valid runtime.Object) {
t.testGetFound(valid.DeepCopyObject())
t.testGetNotFound(valid.DeepCopyObject())
t.testGetMimatchedNamespace(valid.DeepCopyObject())
if !t.clusterScope {
t.testGetDifferentNamespace(valid.DeepCopyObject())
}
}
// Test listing objects.
func (t *Tester) TestList(valid runtime.Object, assignFn AssignFunc) {
t.testListNotFound(assignFn)
t.testListFound(valid.DeepCopyObject(), assignFn)
t.testListMatchLabels(valid.DeepCopyObject(), assignFn)
t.testListTableConversion(valid.DeepCopyObject(), assignFn)
}
// Test watching objects.
func (t *Tester) TestWatch(
valid runtime.Object, emitFn EmitFunc,
labelsPass, labelsFail []labels.Set, fieldsPass, fieldsFail []fields.Set, actions []string) {
t.testWatchLabels(valid.DeepCopyObject(), emitFn, labelsPass, labelsFail, actions)
t.testWatchFields(valid.DeepCopyObject(), emitFn, fieldsPass, fieldsFail, actions)
}
// =============================================================================
// Creation tests.
func (t *Tester) delete(ctx context.Context, obj runtime.Object) error {
objectMeta, err := meta.Accessor(obj)
if err != nil {
return err
}
deleter, ok := t.storage.(rest.GracefulDeleter)
if !ok {
return fmt.Errorf("Expected deleting storage, got %v", t.storage)
}
_, _, err = deleter.Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, nil)
return err
}
func (t *Tester) testCreateAlreadyExisting(obj runtime.Object, createFn CreateFunc, opts metav1.CreateOptions) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
defer t.delete(ctx, foo)
_, err := t.storage.(rest.Creater).Create(ctx, foo, rest.ValidateAllObjectFunc, &opts)
if !errors.IsAlreadyExists(err) {
t.Errorf("expected already exists err, got %v", err)
}
}
func (t *Tester) testCreateDryRun(obj runtime.Object, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(2))
_, err := t.storage.(rest.Creater).Create(ctx, foo, rest.ValidateAllObjectFunc, &metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = getFn(ctx, foo)
if !errors.IsNotFound(err) {
t.Errorf("Expected NotFound error, got '%v'", err)
}
}
func (t *Tester) testCreateDryRunEquals(obj runtime.Object) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(2))
createdFake, err := t.storage.(rest.Creater).Create(ctx, foo, rest.ValidateAllObjectFunc, &metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
created, err := t.storage.(rest.Creater).Create(ctx, foo, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer t.delete(ctx, created)
// Set resource version which might be unset in created object.
createdMeta := t.getObjectMetaOrFail(created)
createdFakeMeta := t.getObjectMetaOrFail(createdFake)
createdMeta.SetCreationTimestamp(createdFakeMeta.GetCreationTimestamp())
createdFakeMeta.SetResourceVersion("")
createdMeta.SetResourceVersion("")
createdMeta.SetUID(createdFakeMeta.GetUID())
if e, a := created, createdFake; !apiequality.Semantic.DeepEqual(e, a) {
t.Errorf("unexpected obj: %#v, expected %#v", e, a)
}
}
func (t *Tester) testCreateEquals(obj runtime.Object, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(2))
created, err := t.storage.(rest.Creater).Create(ctx, foo, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer t.delete(ctx, created)
got, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// Set resource version which might be unset in created object.
createdMeta := t.getObjectMetaOrFail(created)
gotMeta := t.getObjectMetaOrFail(got)
createdMeta.SetResourceVersion(gotMeta.GetResourceVersion())
if e, a := created, got; !apiequality.Semantic.DeepEqual(e, a) {
t.Errorf("unexpected obj: %#v, expected %#v", e, a)
}
}
func (t *Tester) testCreateDiscardsObjectNamespace(valid runtime.Object, opts metav1.CreateOptions) {
objectMeta := t.getObjectMetaOrFail(valid)
// Ignore non-empty namespace in object meta
objectMeta.SetNamespace("not-default")
// Ideally, we'd get an error back here, but at least verify the namespace wasn't persisted
created, err := t.storage.(rest.Creater).Create(t.TestContext(), valid.DeepCopyObject(), rest.ValidateAllObjectFunc, &opts)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(t.TestContext(), created)
createdObjectMeta := t.getObjectMetaOrFail(created)
if createdObjectMeta.GetNamespace() != metav1.NamespaceNone {
t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.GetNamespace())
}
}
func (t *Tester) testCreateGeneratesName(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
objectMeta.SetName("")
objectMeta.SetGenerateName("test-")
created, err := t.storage.(rest.Creater).Create(t.TestContext(), valid, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(t.TestContext(), created)
if objectMeta.GetName() == "test-" || !strings.HasPrefix(objectMeta.GetName(), "test-") {
t.Errorf("unexpected name: %#v", valid)
}
}
func (t *Tester) testCreateHasMetadata(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
objectMeta.SetName(t.namer(1))
objectMeta.SetNamespace(t.TestNamespace())
obj, err := t.storage.(rest.Creater).Create(t.TestContext(), valid, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if obj == nil {
t.Fatalf("Unexpected object from result: %#v", obj)
}
defer t.delete(t.TestContext(), obj)
if !metav1.HasObjectMetaSystemFieldValues(objectMeta) {
t.Errorf("storage did not populate object meta field values")
}
}
func (t *Tester) testCreateIgnoresContextNamespace(valid runtime.Object, opts metav1.CreateOptions) {
// Ignore non-empty namespace in context
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), "not-default2")
// Ideally, we'd get an error back here, but at least verify the namespace wasn't persisted
created, err := t.storage.(rest.Creater).Create(ctx, valid.DeepCopyObject(), rest.ValidateAllObjectFunc, &opts)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(ctx, created)
createdObjectMeta := t.getObjectMetaOrFail(created)
if createdObjectMeta.GetNamespace() != metav1.NamespaceNone {
t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.GetNamespace())
}
}
func (t *Tester) testCreateIgnoresMismatchedNamespace(valid runtime.Object, opts metav1.CreateOptions) {
objectMeta := t.getObjectMetaOrFail(valid)
// Ignore non-empty namespace in object meta
objectMeta.SetNamespace("not-default")
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), "not-default2")
// Ideally, we'd get an error back here, but at least verify the namespace wasn't persisted
created, err := t.storage.(rest.Creater).Create(ctx, valid.DeepCopyObject(), rest.ValidateAllObjectFunc, &opts)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(ctx, created)
createdObjectMeta := t.getObjectMetaOrFail(created)
if createdObjectMeta.GetNamespace() != metav1.NamespaceNone {
t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.GetNamespace())
}
}
func (t *Tester) testCreateValidatesNames(valid runtime.Object, opts metav1.CreateOptions) {
for _, invalidName := range path.NameMayNotBe {
objCopy := valid.DeepCopyObject()
objCopyMeta := t.getObjectMetaOrFail(objCopy)
objCopyMeta.SetName(invalidName)
ctx := t.TestContext()
_, err := t.storage.(rest.Creater).Create(ctx, objCopy, rest.ValidateAllObjectFunc, &opts)
if !errors.IsInvalid(err) {
t.Errorf("%s: Expected to get an invalid resource error, got '%v'", invalidName, err)
}
}
for _, invalidSuffix := range path.NameMayNotContain {
objCopy := valid.DeepCopyObject()
objCopyMeta := t.getObjectMetaOrFail(objCopy)
objCopyMeta.SetName(objCopyMeta.GetName() + invalidSuffix)
ctx := t.TestContext()
_, err := t.storage.(rest.Creater).Create(ctx, objCopy, rest.ValidateAllObjectFunc, &opts)
if !errors.IsInvalid(err) {
t.Errorf("%s: Expected to get an invalid resource error, got '%v'", invalidSuffix, err)
}
}
}
func (t *Tester) testCreateInvokesValidation(opts metav1.CreateOptions, invalid ...runtime.Object) {
for i, obj := range invalid {
ctx := t.TestContext()
_, err := t.storage.(rest.Creater).Create(ctx, obj, rest.ValidateAllObjectFunc, &opts)
if !errors.IsInvalid(err) {
t.Errorf("%d: Expected to get an invalid resource error, got %v", i, err)
}
}
}
func (t *Tester) testCreateRejectsMismatchedNamespace(valid runtime.Object, opts metav1.CreateOptions) {
objectMeta := t.getObjectMetaOrFail(valid)
objectMeta.SetNamespace("not-default")
_, err := t.storage.(rest.Creater).Create(t.TestContext(), valid, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err == nil {
t.Errorf("Expected an error, but we didn't get one")
} else if !strings.Contains(err.Error(), "does not match the namespace sent on the request") {
t.Errorf("Expected 'does not match the namespace sent on the request' error, got '%v'", err.Error())
}
}
func (t *Tester) testCreateResetsUserData(valid runtime.Object, opts metav1.CreateOptions) {
objectMeta := t.getObjectMetaOrFail(valid)
now := metav1.Now()
objectMeta.SetUID("bad-uid")
objectMeta.SetCreationTimestamp(now)
obj, err := t.storage.(rest.Creater).Create(t.TestContext(), valid, rest.ValidateAllObjectFunc, &opts)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if obj == nil {
t.Fatalf("Unexpected object from result: %#v", obj)
}
defer t.delete(t.TestContext(), obj)
if objectMeta.GetUID() == "bad-uid" || objectMeta.GetCreationTimestamp() == now {
t.Errorf("ObjectMeta did not reset basic fields: %#v", objectMeta)
}
}
func (t *Tester) testCreateIgnoreClusterName(valid runtime.Object, opts metav1.CreateOptions) {
objectMeta := t.getObjectMetaOrFail(valid)
objectMeta.SetName(t.namer(3))
objectMeta.SetClusterName("clustername-to-ignore")
obj, err := t.storage.(rest.Creater).Create(t.TestContext(), valid.DeepCopyObject(), rest.ValidateAllObjectFunc, &opts)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(t.TestContext(), obj)
createdObjectMeta := t.getObjectMetaOrFail(obj)
if len(createdObjectMeta.GetClusterName()) != 0 {
t.Errorf("Expected empty clusterName on created object, got '%v'", createdObjectMeta.GetClusterName())
}
}
// =============================================================================
// Update tests.
func (t *Tester) testUpdateEquals(obj runtime.Object, createFn CreateFunc, getFn GetFunc, updateFn UpdateFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(2))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
toUpdate, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
toUpdate = updateFn(toUpdate)
toUpdateMeta := t.getObjectMetaOrFail(toUpdate)
updated, created, err := t.storage.(rest.Updater).Update(ctx, toUpdateMeta.GetName(), rest.DefaultUpdatedObjectInfo(toUpdate), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if created {
t.Errorf("unexpected creation")
}
got, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// Set resource version which might be unset in created object.
updatedMeta := t.getObjectMetaOrFail(updated)
gotMeta := t.getObjectMetaOrFail(got)
updatedMeta.SetResourceVersion(gotMeta.GetResourceVersion())
if e, a := updated, got; !apiequality.Semantic.DeepEqual(e, a) {
t.Errorf("unexpected obj: %#v, expected %#v", e, a)
}
}
func (t *Tester) testUpdateFailsOnVersionTooOld(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(3))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
older := storedFoo.DeepCopyObject()
olderMeta := t.getObjectMetaOrFail(older)
olderMeta.SetResourceVersion("1")
_, _, err = t.storage.(rest.Updater).Update(t.TestContext(), olderMeta.GetName(), rest.DefaultUpdatedObjectInfo(older), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err == nil {
t.Errorf("Expected an error, but we didn't get one")
} else if !errors.IsConflict(err) {
t.Errorf("Expected Conflict error, got '%v'", err)
}
}
func (t *Tester) testUpdateInvokesValidation(obj runtime.Object, createFn CreateFunc, invalidUpdateFn ...UpdateFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(4))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
for _, update := range invalidUpdateFn {
toUpdate := update(foo.DeepCopyObject())
toUpdateMeta := t.getObjectMetaOrFail(toUpdate)
got, created, err := t.storage.(rest.Updater).Update(t.TestContext(), toUpdateMeta.GetName(), rest.DefaultUpdatedObjectInfo(toUpdate), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if got != nil || created {
t.Errorf("expected nil object and no creation for object: %v", toUpdate)
}
if !errors.IsInvalid(err) && !errors.IsBadRequest(err) {
t.Errorf("expected invalid or bad request error, got %v", err)
}
}
}
func (t *Tester) testUpdateWithWrongUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, opts metav1.UpdateOptions) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(5))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.SetUID(types.UID("UID0000"))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
defer t.delete(ctx, foo)
objectMeta.SetUID(types.UID("UID1111"))
obj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.GetName(), rest.DefaultUpdatedObjectInfo(foo), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &opts)
if created || obj != nil {
t.Errorf("expected nil object and no creation for object: %v", foo)
}
if err == nil || !errors.IsConflict(err) {
t.Errorf("unexpected error: %v", err)
}
}
func (t *Tester) testUpdateRetrievesOldObject(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(6))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.SetAnnotations(map[string]string{"A": "1"})
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
return
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
storedFooWithUpdates := storedFoo.DeepCopyObject()
objectMeta = t.getObjectMetaOrFail(storedFooWithUpdates)
objectMeta.SetAnnotations(map[string]string{"A": "2"})
// Make sure a custom transform is called, and sees the expected updatedObject and oldObject
// This tests the mechanism used to pass the old and new object to admission
calledUpdatedObject := 0
noopTransform := func(_ context.Context, updatedObject runtime.Object, oldObject runtime.Object) (runtime.Object, error) {
if !reflect.DeepEqual(storedFoo, oldObject) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", storedFoo, oldObject)
}
if !reflect.DeepEqual(storedFooWithUpdates, updatedObject) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", storedFooWithUpdates, updatedObject)
}
calledUpdatedObject++
return updatedObject, nil
}
updatedObj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.GetName(), rest.DefaultUpdatedObjectInfo(storedFooWithUpdates, noopTransform), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if created {
t.Errorf("expected no creation for object")
return
}
if updatedObj == nil {
t.Errorf("expected non-nil object from update")
return
}
if calledUpdatedObject != 1 {
t.Errorf("expected UpdatedObject() to be called 1 time, was called %d", calledUpdatedObject)
return
}
}
func (t *Tester) testUpdatePropagatesUpdatedObjectError(obj runtime.Object, createFn CreateFunc, getFn GetFunc, opts metav1.UpdateOptions) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
name := t.namer(7)
t.setObjectMeta(foo, name)
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
return
}
defer t.delete(ctx, foo)
// Make sure our transform is called, and sees the expected updatedObject and oldObject
propagateErr := fmt.Errorf("custom updated object error for %v", foo)
noopTransform := func(_ context.Context, updatedObject runtime.Object, oldObject runtime.Object) (runtime.Object, error) {
return nil, propagateErr
}
_, _, err := t.storage.(rest.Updater).Update(ctx, name, rest.DefaultUpdatedObjectInfo(foo, noopTransform), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &opts)
if err != propagateErr {
t.Errorf("expected propagated error, got %#v", err)
}
}
func (t *Tester) testUpdateIgnoreGenerationUpdates(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
name := t.namer(8)
t.setObjectMeta(foo, name)
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
older := storedFoo.DeepCopyObject()
olderMeta := t.getObjectMetaOrFail(older)
olderMeta.SetGeneration(2)
_, _, err = t.storage.(rest.Updater).Update(t.TestContext(), olderMeta.GetName(), rest.DefaultUpdatedObjectInfo(older), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
updatedFoo, err := getFn(ctx, older)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if exp, got := int64(1), t.getObjectMetaOrFail(updatedFoo).GetGeneration(); exp != got {
t.Errorf("Unexpected generation update: expected %d, got %d", exp, got)
}
}
func (t *Tester) testUpdateOnNotFound(obj runtime.Object, opts metav1.UpdateOptions) {
t.setObjectMeta(obj, t.namer(0))
_, created, err := t.storage.(rest.Updater).Update(t.TestContext(), t.namer(0), rest.DefaultUpdatedObjectInfo(obj), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &opts)
if t.createOnUpdate {
if err != nil {
t.Errorf("creation allowed on updated, but got an error: %v", err)
}
if !created {
t.Errorf("creation allowed on update, but object not created")
}
} else {
if err == nil {
t.Errorf("Expected an error, but we didn't get one")
} else if !errors.IsNotFound(err) {
t.Errorf("Expected NotFound error, got '%v'", err)
}
}
}
func (t *Tester) testUpdateRejectsMismatchedNamespace(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(storedFoo)
objectMeta.SetName(t.namer(1))
objectMeta.SetNamespace("not-default")
obj, updated, err := t.storage.(rest.Updater).Update(t.TestContext(), "foo1", rest.DefaultUpdatedObjectInfo(storedFoo), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if obj != nil || updated {
t.Errorf("expected nil object and not updated")
}
if err == nil {
t.Errorf("expected an error, but didn't get one")
} else if !strings.Contains(err.Error(), "does not match the namespace sent on the request") {
t.Errorf("expected 'does not match the namespace sent on the request' error, got '%v'", err.Error())
}
}
func (t *Tester) testUpdateIgnoreClusterName(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
name := t.namer(9)
t.setObjectMeta(foo, name)
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
older := storedFoo.DeepCopyObject()
olderMeta := t.getObjectMetaOrFail(older)
olderMeta.SetClusterName("clustername-to-ignore")
_, _, err = t.storage.(rest.Updater).Update(t.TestContext(), olderMeta.GetName(), rest.DefaultUpdatedObjectInfo(older), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
updatedFoo, err := getFn(ctx, older)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if clusterName := t.getObjectMetaOrFail(updatedFoo).GetClusterName(); len(clusterName) != 0 {
t.Errorf("Unexpected clusterName update: expected empty, got %v", clusterName)
}
}
// =============================================================================
// Deletion tests.
func (t *Tester) testDeleteNoGraceful(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, dryRun bool) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
defer t.delete(ctx, foo)
objectMeta := t.getObjectMetaOrFail(foo)
opts := metav1.NewDeleteOptions(10)
if dryRun {
opts.DryRun = []string{metav1.DryRunAll}
}
obj, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, opts)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !wasDeleted {
t.Errorf("unexpected, object %s should have been deleted immediately", objectMeta.GetName())
}
if !t.returnDeletedObject {
if status, ok := obj.(*metav1.Status); !ok {
t.Errorf("expected status of delete, got %v", status)
} else if status.Status != metav1.StatusSuccess {
t.Errorf("expected success, got: %v", status.Status)
}
}
_, err = getFn(ctx, foo)
if !dryRun && (err == nil || !isNotFoundFn(err)) {
t.Errorf("unexpected error: %v", err)
} else if dryRun && isNotFoundFn(err) {
t.Error("object should not have been removed in dry-run")
}
}
func (t *Tester) testDeleteNonExist(obj runtime.Object, opts metav1.DeleteOptions) {
objectMeta := t.getObjectMetaOrFail(obj)
_, _, err := t.storage.(rest.GracefulDeleter).Delete(t.TestContext(), objectMeta.GetName(), rest.ValidateAllObjectFunc, &opts)
if err == nil || !errors.IsNotFound(err) {
t.Errorf("unexpected error: %v", err)
}
}
// This test the fast-fail path. We test that the precondition gets verified
// again before deleting the object in tests of pkg/storage/etcd.
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.SetUID(types.UID("UID0000"))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
opts.Preconditions = metav1.NewPreconditionDeleteOptions("UID1111").Preconditions
_, _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, &opts)
if err == nil || !errors.IsConflict(err) {
t.Errorf("unexpected error: %v", err)
}
obj, _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewPreconditionDeleteOptions("UID0000"))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !t.returnDeletedObject {
if status, ok := obj.(*metav1.Status); !ok {
t.Errorf("expected status of delete, got %v", status)
} else if status.Status != metav1.StatusSuccess {
t.Errorf("expected success, got: %v", status.Status)
}
}
_, err = getFn(ctx, foo)
if err == nil || !isNotFoundFn(err) {
t.Errorf("unexpected error: %v", err)
}
}
// This test the fast-fail path. We test that the precondition gets verified
// again before deleting the object in tests of pkg/storage/etcd.
func (t *Tester) testDeleteWithResourceVersion(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc, opts metav1.DeleteOptions) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.SetResourceVersion("RV0000")
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
opts.Preconditions = metav1.NewRVDeletionPrecondition("RV1111").Preconditions
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, &opts)
if err == nil || !errors.IsConflict(err) {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
obj, _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewRVDeletionPrecondition("RV0000"))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !t.returnDeletedObject {
if status, ok := obj.(*metav1.Status); !ok {
t.Errorf("expected status of delete, got %v", status)
} else if status.Status != metav1.StatusSuccess {
t.Errorf("expected success, got: %v", status.Status)
}
}
_, err = getFn(ctx, foo)
if err == nil || !isNotFoundFn(err) {
t.Errorf("unexpected error: %v", err)
}
}
// =============================================================================
// Graceful Deletion tests.
func (t *Tester) testDeleteDryRunGracefulHasdefault(obj runtime.Object, createFn CreateFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
defer t.delete(ctx, foo)
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
object, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, &metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
_, _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, &metav1.DeleteOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func (t *Tester) testDeleteGracefulHasDefault(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.GetGeneration()
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, &metav1.DeleteOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
if _, err := getFn(ctx, foo); err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Fatalf("unexpected error, object should exist: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
if generation >= objectMeta.GetGeneration() {
t.Error("Generation wasn't bumped when deletion timestamp was set")
}
}
func (t *Tester) testDeleteGracefulWithValue(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(2))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.GetGeneration()
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewDeleteOptions(expectedGrace+2))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
if _, err := getFn(ctx, foo); err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace+2 {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
if generation >= objectMeta.GetGeneration() {
t.Error("Generation wasn't bumped when deletion timestamp was set")
}
}
func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(3))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.GetGeneration()
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
if _, err := getFn(ctx, foo); err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
// second delete duration is ignored
_, wasDeleted, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewDeleteOptions(expectedGrace+2))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
if generation >= objectMeta.GetGeneration() {
t.Error("Generation wasn't bumped when deletion timestamp was set")
}
}
func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, "foo4")
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.GetGeneration()
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
if _, err := getFn(ctx, foo); err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
// second delete is immediate, resource is deleted
out, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewDeleteOptions(0))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !wasDeleted {
t.Errorf("unexpected, object %s should have been deleted immediately", objectMeta.GetName())
}
_, err = t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if !errors.IsNotFound(err) {
t.Errorf("unexpected error, object should be deleted immediately: %v", err)
}
objectMeta = t.getObjectMetaOrFail(out)
// the second delete shouldn't update the object, so the objectMeta.GetDeletionGracePeriodSeconds() should eqaul to the value set in the first delete.
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != 0 {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
if generation >= objectMeta.GetGeneration() {
t.Error("Generation wasn't bumped when deletion timestamp was set")
}
}
func (t *Tester) testDeleteGracefulUsesZeroOnNil(obj runtime.Object, createFn CreateFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(5))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !wasDeleted {
t.Errorf("unexpected, object %s should have been deleted immediately", objectMeta.GetName())
}
if _, err := t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{}); !errors.IsNotFound(err) {
t.Errorf("unexpected error, object should not exist: %v", err)
}
}
// Regression test for bug discussed in #27539
func (t *Tester) testDeleteGracefulShorten(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(6))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
bigGrace := int64(time.Hour)
if expectedGrace > bigGrace {
bigGrace = 2 * expectedGrace
}
objectMeta := t.getObjectMetaOrFail(foo)
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewDeleteOptions(bigGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
object, err := getFn(ctx, foo)
if err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
deletionTimestamp := *objectMeta.GetDeletionTimestamp()
// second delete duration is ignored
_, wasDeleted, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), rest.ValidateAllObjectFunc, metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
object, err = t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil ||
*objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace || !objectMeta.GetDeletionTimestamp().Before(&deletionTimestamp) {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
}
// =============================================================================
// Get tests.
// testGetDifferentNamespace ensures same-name objects in different namespaces do not clash
func (t *Tester) testGetDifferentNamespace(obj runtime.Object) {
if t.clusterScope {
t.Fatalf("the test does not work in cluster-scope")
}
objMeta := t.getObjectMetaOrFail(obj)
objMeta.SetName(t.namer(5))
ctx1 := genericapirequest.WithNamespace(genericapirequest.NewContext(), "bar3")
objMeta.SetNamespace(genericapirequest.NamespaceValue(ctx1))
_, err := t.storage.(rest.Creater).Create(ctx1, obj, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
ctx2 := genericapirequest.WithNamespace(genericapirequest.NewContext(), "bar4")
objMeta.SetNamespace(genericapirequest.NamespaceValue(ctx2))
_, err = t.storage.(rest.Creater).Create(ctx2, obj, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
got1, err := t.storage.(rest.Getter).Get(ctx1, objMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
got1Meta := t.getObjectMetaOrFail(got1)
if got1Meta.GetName() != objMeta.GetName() {
t.Errorf("unexpected name of object: %#v, expected: %s", got1, objMeta.GetName())
}
if got1Meta.GetNamespace() != genericapirequest.NamespaceValue(ctx1) {
t.Errorf("unexpected namespace of object: %#v, expected: %s", got1, genericapirequest.NamespaceValue(ctx1))
}
got2, err := t.storage.(rest.Getter).Get(ctx2, objMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
got2Meta := t.getObjectMetaOrFail(got2)
if got2Meta.GetName() != objMeta.GetName() {
t.Errorf("unexpected name of object: %#v, expected: %s", got2, objMeta.GetName())
}
if got2Meta.GetNamespace() != genericapirequest.NamespaceValue(ctx2) {
t.Errorf("unexpected namespace of object: %#v, expected: %s", got2, genericapirequest.NamespaceValue(ctx2))
}
}
func (t *Tester) testGetFound(obj runtime.Object) {
ctx := t.TestContext()
t.setObjectMeta(obj, t.namer(1))
existing, err := t.storage.(rest.Creater).Create(ctx, obj, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
existingMeta := t.getObjectMetaOrFail(existing)
got, err := t.storage.(rest.Getter).Get(ctx, t.namer(1), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
gotMeta := t.getObjectMetaOrFail(got)
gotMeta.SetResourceVersion(existingMeta.GetResourceVersion())
if e, a := existing, got; !apiequality.Semantic.DeepEqual(e, a) {
t.Errorf("unexpected obj: %#v, expected %#v", e, a)
}
}
func (t *Tester) testGetMimatchedNamespace(obj runtime.Object) {
ctx1 := genericapirequest.WithNamespace(genericapirequest.NewContext(), "bar1")
ctx2 := genericapirequest.WithNamespace(genericapirequest.NewContext(), "bar2")
objMeta := t.getObjectMetaOrFail(obj)
objMeta.SetName(t.namer(4))
objMeta.SetNamespace(genericapirequest.NamespaceValue(ctx1))
_, err := t.storage.(rest.Creater).Create(ctx1, obj, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = t.storage.(rest.Getter).Get(ctx2, t.namer(4), &metav1.GetOptions{})
if t.clusterScope {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
} else {
if !errors.IsNotFound(err) {
t.Errorf("unexpected error returned: %#v", err)
}
}
}
func (t *Tester) testGetNotFound(obj runtime.Object) {
ctx := t.TestContext()
t.setObjectMeta(obj, t.namer(2))
_, err := t.storage.(rest.Creater).Create(ctx, obj, rest.ValidateAllObjectFunc, &metav1.CreateOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = t.storage.(rest.Getter).Get(ctx, t.namer(3), &metav1.GetOptions{})
if !errors.IsNotFound(err) {
t.Errorf("unexpected error returned: %#v", err)
}
}
// =============================================================================
// List tests.
func listToItems(listObj runtime.Object) ([]runtime.Object, error) {
v, err := conversion.EnforcePtr(listObj)
if err != nil {
return nil, fmt.Errorf("unexpected error: %v", err)
}
items := v.FieldByName("Items")
if !items.IsValid() {
return nil, fmt.Errorf("unexpected Items field in %v", listObj)
}
if items.Type().Kind() != reflect.Slice {
return nil, fmt.Errorf("unexpected Items field type: %v", items.Type().Kind())
}
result := make([]runtime.Object, items.Len())
for i := 0; i < items.Len(); i++ {
result[i] = items.Index(i).Addr().Interface().(runtime.Object)
}
return result, nil
}
func (t *Tester) testListFound(obj runtime.Object, assignFn AssignFunc) {
ctx := t.TestContext()
foo1 := obj.DeepCopyObject()
t.setObjectMeta(foo1, t.namer(1))
foo2 := obj.DeepCopyObject()
t.setObjectMeta(foo2, t.namer(2))
existing := assignFn([]runtime.Object{foo1, foo2})
listObj, err := t.storage.(rest.Lister).List(ctx, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
items, err := listToItems(listObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(items) != len(existing) {
t.Errorf("unexpected number of items: %v", len(items))
}
if !apiequality.Semantic.DeepEqual(existing, items) {
t.Errorf("expected: %#v, got: %#v", existing, items)
}
}
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) {
ctx := t.TestContext()
testLabels := map[string]string{"key": "value"}
foo3 := obj.DeepCopyObject()
t.setObjectMeta(foo3, "foo3")
foo4 := obj.DeepCopyObject()
foo4Meta := t.getObjectMetaOrFail(foo4)
foo4Meta.SetName("foo4")
foo4Meta.SetNamespace(genericapirequest.NamespaceValue(ctx))
foo4Meta.SetLabels(testLabels)
objs := ([]runtime.Object{foo3, foo4})
assignFn(objs)
filtered := []runtime.Object{objs[1]}
selector := labels.SelectorFromSet(labels.Set(testLabels))
options := &metainternalversion.ListOptions{LabelSelector: selector}
listObj, err := t.storage.(rest.Lister).List(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
items, err := listToItems(listObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(items) != len(filtered) {
t.Errorf("unexpected number of items: %v", len(items))
}
if !apiequality.Semantic.DeepEqual(filtered, items) {
t.Errorf("expected: %#v, got: %#v", filtered, items)
}
}
func (t *Tester) testListNotFound(assignFn AssignFunc) {
ctx := t.TestContext()
_ = assignFn([]runtime.Object{})
listObj, err := t.storage.(rest.Lister).List(ctx, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
items, err := listToItems(listObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(items) != 0 {
t.Errorf("unexpected items: %#v", items)
}
}
// testListTableConversion verifies a set of known bounds and expected limitations for the values
// returned from a TableList. These conditions may be changed if necessary with adequate review.
func (t *Tester) testListTableConversion(obj runtime.Object, assignFn AssignFunc) {
ctx := t.TestContext()
testLabels := map[string]string{"key": "value"}
foo3 := obj.DeepCopyObject()
t.setObjectMeta(foo3, "foo3")
foo4 := obj.DeepCopyObject()
foo4Meta := t.getObjectMetaOrFail(foo4)
foo4Meta.SetName("foo4")
foo4Meta.SetNamespace(genericapirequest.NamespaceValue(ctx))
foo4Meta.SetLabels(testLabels)
objs := ([]runtime.Object{foo3, foo4})
assignFn(objs)
options := &metainternalversion.ListOptions{}
listObj, err := t.storage.(rest.Lister).List(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
items, err := listToItems(listObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(items) != len(objs) {
t.Errorf("unexpected number of items: %v", len(items))
}
if !apiequality.Semantic.DeepEqual(objs, items) {
t.Errorf("expected: %#v, got: %#v", objs, items)
}
m, err := meta.ListAccessor(listObj)
if err != nil {
t.Fatalf("list should support ListMeta %T: %v", listObj, err)
}
m.SetContinue("continuetoken")
m.SetResourceVersion("11")
m.SetSelfLink("/list/link")
table, err := t.storage.(rest.TableConvertor).ConvertToTable(ctx, listObj, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if table.ResourceVersion != "11" || table.SelfLink != "/list/link" || table.Continue != "continuetoken" {
t.Errorf("printer lost list meta: %#v", table.ListMeta)
}
if len(table.Rows) != len(items) {
t.Errorf("unexpected number of rows: %v", len(table.Rows))
}
columns := table.ColumnDefinitions
if len(columns) == 0 {
t.Fatalf("unexpected number of columns: %v\n%#v", len(columns), columns)
}
if !strings.EqualFold(columns[0].Name, "Name") || columns[0].Type != "string" || columns[0].Format != "name" {
t.Errorf("expect column 0 to be the name column: %#v", columns[0])
}
for j, column := range columns {
if len(column.Name) == 0 {
t.Errorf("column %d has no name", j)
}
switch column.Type {
case "string", "date", "integer", "number", "boolean":
default:
t.Errorf("column %d has unexpected type: %q", j, column.Type)
}
switch {
case column.Format == "":
case column.Format == "name" && column.Type == "string":
default:
t.Errorf("column %d has unexpected format: %q with type %q", j, column.Format, column.Type)
}
if column.Priority < 0 || column.Priority > 2 {
t.Errorf("column %d has unexpected priority: %q", j, column.Priority)
}
if len(column.Description) == 0 {
t.Errorf("column %d has no description", j)
}
if column.Name == "Created At" && column.Type != "date" && column.Format != "" {
t.Errorf("column %d looks like a created at column, but has a different type and format: %#v", j, column)
}
}
for i, row := range table.Rows {
if len(row.Cells) != len(table.ColumnDefinitions) {
t.Errorf("row %d did not have the correct number of cells: %d in %v, expected %d", i, len(row.Cells), row.Cells, len(table.ColumnDefinitions))
}
for j, cell := range row.Cells {
// do not add to this test without discussion - may break clients
switch cell.(type) {
case float64, int64, int32, int, string, bool:
case []interface{}:
case nil:
default:
t.Errorf("row %d, cell %d has an unrecognized type, only JSON serialization safe types are allowed: %T ", i, j, cell)
}
}
if len(row.Cells) != len(table.ColumnDefinitions) {
t.Fatalf("unmatched row length on row %d: %#v", i, row.Cells)
}
}
data, _ := json.MarshalIndent(table, "", " ")
t.Logf("%s", string(data))
}
// =============================================================================
// Watching tests.
func (t *Tester) testWatchFields(obj runtime.Object, emitFn EmitFunc, fieldsPass, fieldsFail []fields.Set, actions []string) {
ctx := t.TestContext()
for _, field := range fieldsPass {
for _, action := range actions {
options := &metainternalversion.ListOptions{FieldSelector: field.AsSelector(), ResourceVersion: "1"}
watcher, err := t.storage.(rest.Watcher).Watch(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v, %v", err, action)
}
if err := emitFn(obj, action); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case _, ok := <-watcher.ResultChan():
if !ok {
t.Errorf("watch channel should be open")
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("unexpected timeout from result channel")
}
watcher.Stop()
}
}
for _, field := range fieldsFail {
for _, action := range actions {
options := &metainternalversion.ListOptions{FieldSelector: field.AsSelector(), ResourceVersion: "1"}
watcher, err := t.storage.(rest.Watcher).Watch(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := emitFn(obj, action); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case <-watcher.ResultChan():
t.Errorf("unexpected result from result channel")
case <-time.After(time.Millisecond * 500):
// expected case
}
watcher.Stop()
}
}
}
func (t *Tester) testWatchLabels(obj runtime.Object, emitFn EmitFunc, labelsPass, labelsFail []labels.Set, actions []string) {
ctx := t.TestContext()
for _, label := range labelsPass {
for _, action := range actions {
options := &metainternalversion.ListOptions{LabelSelector: label.AsSelector(), ResourceVersion: "1"}
watcher, err := t.storage.(rest.Watcher).Watch(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := emitFn(obj, action); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case _, ok := <-watcher.ResultChan():
if !ok {
t.Errorf("watch channel should be open")
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("unexpected timeout from result channel")
}
watcher.Stop()
}
}
for _, label := range labelsFail {
for _, action := range actions {
options := &metainternalversion.ListOptions{LabelSelector: label.AsSelector(), ResourceVersion: "1"}
watcher, err := t.storage.(rest.Watcher).Watch(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := emitFn(obj, action); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case <-watcher.ResultChan():
t.Errorf("unexpected result from result channel")
case <-time.After(time.Millisecond * 500):
// expected case
}
watcher.Stop()
}
}
}
| staging/src/k8s.io/apiserver/pkg/registry/rest/resttest/resttest.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00036184382042847574,
0.00016961366054601967,
0.00015807044110260904,
0.00016690602933522314,
0.0000179293128894642
] |
{
"id": 13,
"code_window": [
"\t// the current contents of the object when deciding how the update object should look.\n",
"\t// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false\n",
"\t// or zero value in 'ptrToType' parameter otherwise.\n",
"\t// If the object to update has the same value as previous, it won't do any update\n",
"\t// but will return the object in 'ptrToType' parameter.\n",
"\t// If 'suggestion' is non-nil, it can be used as a suggestion about the current version\n",
"\t// of the object to avoid read operation from storage to get it. However, the\n",
"\t// implementations have to retry in case suggestion is stale.\n",
"\t//\n",
"\t// Example:\n",
"\t//\n",
"\t// s := /* implementation of Interface */\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the\n",
"\t// current version of the object to avoid read operation from storage to get it.\n",
"\t// However, the implementations have to retry in case suggestion is stale.\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/interfaces.go",
"type": "replace",
"edit_start_line_idx": 222
} | // +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package example2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TestType) DeepCopyInto(out *TestType) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestType.
func (in *TestType) DeepCopy() *TestType {
if in == nil {
return nil
}
out := new(TestType)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TestType) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TestTypeList) DeepCopyInto(out *TestTypeList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]TestType, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeList.
func (in *TestTypeList) DeepCopy() *TestTypeList {
if in == nil {
return nil
}
out := new(TestTypeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TestTypeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TestTypeStatus) DeepCopyInto(out *TestTypeStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeStatus.
func (in *TestTypeStatus) DeepCopy() *TestTypeStatus {
if in == nil {
return nil
}
out := new(TestTypeStatus)
in.DeepCopyInto(out)
return out
}
| staging/src/k8s.io/code-generator/_examples/apiserver/apis/example2/zz_generated.deepcopy.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017860493971966207,
0.0001687382027739659,
0.0001624141150387004,
0.00016764762403909117,
0.000005899444204260362
] |
{
"id": 13,
"code_window": [
"\t// the current contents of the object when deciding how the update object should look.\n",
"\t// If the key doesn't exist, it will return NotFound storage error if ignoreNotFound=false\n",
"\t// or zero value in 'ptrToType' parameter otherwise.\n",
"\t// If the object to update has the same value as previous, it won't do any update\n",
"\t// but will return the object in 'ptrToType' parameter.\n",
"\t// If 'suggestion' is non-nil, it can be used as a suggestion about the current version\n",
"\t// of the object to avoid read operation from storage to get it. However, the\n",
"\t// implementations have to retry in case suggestion is stale.\n",
"\t//\n",
"\t// Example:\n",
"\t//\n",
"\t// s := /* implementation of Interface */\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t// If 'cachedExistingObject' is non-nil, it can be used as a suggestion about the\n",
"\t// current version of the object to avoid read operation from storage to get it.\n",
"\t// However, the implementations have to retry in case suggestion is stale.\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/interfaces.go",
"type": "replace",
"edit_start_line_idx": 222
} | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <[email protected]>
@author: Jorge Orpinel <[email protected]>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while True:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'vendor' in dirs:
dirs.remove('vendor')
if 'staging' in dirs:
dirs.remove('staging')
if '_output' in dirs:
dirs.remove('_output')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if '.make' in dirs:
dirs.remove('.make')
if 'BUILD' in files:
files.remove('BUILD')
for name in files:
pathname = os.path.join(root, name)
if not is_binary(pathname):
all_files.append(pathname)
return all_files
# Collects all the flags used in golang files and verifies the flags do
# not contain underscore. If any flag needs to be excluded from this check,
# need to add that flag in hack/verify-flags/excluded-flags.txt.
def check_underscore_in_flags(rootdir, files):
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
check_underscore_in_flags(rootdir, files)
if __name__ == "__main__":
sys.exit(main())
| hack/verify-flags-underscore.py | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017653510440140963,
0.00016908251564018428,
0.00016548464191146195,
0.00016803076141513884,
0.0000033434705528634368
] |
{
"id": 14,
"code_window": [
"\t// return cur, nil, nil\n",
"\t// },\n",
"\t// )\n",
"\tGuaranteedUpdate(\n",
"\t\tctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,\n",
"\t\tprecondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error\n",
"\n",
"\t// Count returns number of different entries under the key (generally being path prefix).\n",
"\tCount(key string) (int64, error)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tprecondtions *Preconditions, tryUpdate UpdateFunc, cachedExistingObject runtime.Object) error\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/interfaces.go",
"type": "replace",
"edit_start_line_idx": 246
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd3
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path"
"reflect"
"strings"
"time"
"go.etcd.io/etcd/clientv3"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/features"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/value"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
utiltrace "k8s.io/utils/trace"
)
// authenticatedDataString satisfies the value.Context interface. It uses the key to
// authenticate the stored data. This does not defend against reuse of previously
// encrypted values under the same key, but will prevent an attacker from using an
// encrypted value from a different key. A stronger authenticated data segment would
// include the etcd3 Version field (which is incremented on each write to a key and
// reset when the key is deleted), but an attacker with write access to etcd can
// force deletion and recreation of keys to weaken that angle.
type authenticatedDataString string
// AuthenticatedData implements the value.Context interface.
func (d authenticatedDataString) AuthenticatedData() []byte {
return []byte(string(d))
}
var _ value.Context = authenticatedDataString("")
type store struct {
client *clientv3.Client
codec runtime.Codec
versioner storage.Versioner
transformer value.Transformer
pathPrefix string
watcher *watcher
pagingEnabled bool
leaseManager *leaseManager
}
type objState struct {
obj runtime.Object
meta *storage.ResponseMeta
rev int64
data []byte
stale bool
}
// New returns an etcd3 implementation of storage.Interface.
func New(c *clientv3.Client, codec runtime.Codec, newFunc func() runtime.Object, prefix string, transformer value.Transformer, pagingEnabled bool, leaseReuseDurationSeconds int64) storage.Interface {
return newStore(c, newFunc, pagingEnabled, leaseReuseDurationSeconds, codec, prefix, transformer)
}
func newStore(c *clientv3.Client, newFunc func() runtime.Object, pagingEnabled bool, leaseReuseDurationSeconds int64, codec runtime.Codec, prefix string, transformer value.Transformer) *store {
versioner := APIObjectVersioner{}
result := &store{
client: c,
codec: codec,
versioner: versioner,
transformer: transformer,
pagingEnabled: pagingEnabled,
// for compatibility with etcd2 impl.
// no-op for default prefix of '/registry'.
// keeps compatibility with etcd2 impl for custom prefixes that don't start with '/'
pathPrefix: path.Join("/", prefix),
watcher: newWatcher(c, codec, newFunc, versioner, transformer),
leaseManager: newDefaultLeaseManager(c, leaseReuseDurationSeconds),
}
return result
}
// Versioner implements storage.Interface.Versioner.
func (s *store) Versioner() storage.Versioner {
return s.versioner
}
// Get implements storage.Interface.Get.
func (s *store) Get(ctx context.Context, key string, opts storage.GetOptions, out runtime.Object) error {
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(opts.ResourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) == 0 {
if opts.IgnoreNotFound {
return runtime.SetZeroValue(out)
}
return storage.NewKeyNotFoundError(key, 0)
}
kv := getResp.Kvs[0]
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
return decode(s.codec, s.versioner, data, out, kv.ModRevision)
}
// Create implements storage.Interface.Create.
func (s *store) Create(ctx context.Context, key string, obj, out runtime.Object, ttl uint64) error {
if version, err := s.versioner.ObjectResourceVersion(obj); err == nil && version != 0 {
return errors.New("resourceVersion should not be set on objects to be created")
}
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
data, err := runtime.Encode(s.codec, obj)
if err != nil {
return err
}
key = path.Join(s.pathPrefix, key)
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
newData, err := s.transformer.TransformToStorage(data, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
notFound(key),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Commit()
metrics.RecordEtcdRequestLatency("create", getTypeName(obj), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
return storage.NewKeyExistsError(key, 0)
}
if out != nil {
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
return nil
}
// Delete implements storage.Interface.Delete.
func (s *store) Delete(
ctx context.Context, key string, out runtime.Object, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
return s.conditionalDelete(ctx, key, out, v, preconditions, validateDeletion, cachedExistingObject)
}
func (s *store) conditionalDelete(
ctx context.Context, key string, out runtime.Object, v reflect.Value, preconditions *storage.Preconditions,
validateDeletion storage.ValidateObjectFunc, cachedExistingObject runtime.Object) error {
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, false)
}
var origState *objState
var err error
var origStateIsCurrent bool
if cachedExistingObject != nil {
origState, err = s.getStateFromObject(cachedExistingObject)
} else {
origState, err = getCurrentState()
origStateIsCurrent = true
}
if err != nil {
return err
}
for {
if preconditions != nil {
if err := preconditions.Check(key, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
}
if err := validateDeletion(ctx, origState.obj); err != nil {
if origStateIsCurrent {
return err
}
// It's possible we're working with stale data.
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
origStateIsCurrent = true
// Retry
continue
}
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpDelete(key),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("delete", getTypeName(out), startTime)
if err != nil {
return err
}
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("deletion of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, false)
if err != nil {
return err
}
origStateIsCurrent = true
continue
}
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
// GuaranteedUpdate implements storage.Interface.GuaranteedUpdate.
func (s *store) GuaranteedUpdate(
ctx context.Context, key string, out runtime.Object, ignoreNotFound bool,
preconditions *storage.Preconditions, tryUpdate storage.UpdateFunc, suggestion runtime.Object) error {
trace := utiltrace.New("GuaranteedUpdate etcd3", utiltrace.Field{"type", getTypeName(out)})
defer trace.LogIfLong(500 * time.Millisecond)
v, err := conversion.EnforcePtr(out)
if err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
key = path.Join(s.pathPrefix, key)
getCurrentState := func() (*objState, error) {
startTime := time.Now()
getResp, err := s.client.KV.Get(ctx, key)
metrics.RecordEtcdRequestLatency("get", getTypeName(out), startTime)
if err != nil {
return nil, err
}
return s.getState(getResp, key, v, ignoreNotFound)
}
var origState *objState
var mustCheckData bool
if suggestion != nil {
origState, err = s.getStateFromObject(suggestion)
mustCheckData = true
} else {
origState, err = getCurrentState()
}
if err != nil {
return err
}
trace.Step("initial value restored")
transformContext := authenticatedDataString(key)
for {
if err := preconditions.Check(key, origState.obj); err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
ret, ttl, err := s.updateState(origState, tryUpdate)
if err != nil {
// If our data is already up to date, return the error
if !mustCheckData {
return err
}
// It's possible we were working with stale data
// Actually fetch
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
// Retry
continue
}
data, err := runtime.Encode(s.codec, ret)
if err != nil {
return err
}
if !origState.stale && bytes.Equal(data, origState.data) {
// if we skipped the original Get in this loop, we must refresh from
// etcd in order to be sure the data in the store is equivalent to
// our desired serialization
if mustCheckData {
origState, err = getCurrentState()
if err != nil {
return err
}
mustCheckData = false
if !bytes.Equal(data, origState.data) {
// original data changed, restart loop
continue
}
}
// recheck that the data from etcd is not stale before short-circuiting a write
if !origState.stale {
return decode(s.codec, s.versioner, origState.data, out, origState.rev)
}
}
newData, err := s.transformer.TransformToStorage(data, transformContext)
if err != nil {
return storage.NewInternalError(err.Error())
}
opts, err := s.ttlOpts(ctx, int64(ttl))
if err != nil {
return err
}
trace.Step("Transaction prepared")
startTime := time.Now()
txnResp, err := s.client.KV.Txn(ctx).If(
clientv3.Compare(clientv3.ModRevision(key), "=", origState.rev),
).Then(
clientv3.OpPut(key, string(newData), opts...),
).Else(
clientv3.OpGet(key),
).Commit()
metrics.RecordEtcdRequestLatency("update", getTypeName(out), startTime)
if err != nil {
return err
}
trace.Step("Transaction committed")
if !txnResp.Succeeded {
getResp := (*clientv3.GetResponse)(txnResp.Responses[0].GetResponseRange())
klog.V(4).Infof("GuaranteedUpdate of %s failed because of a conflict, going to retry", key)
origState, err = s.getState(getResp, key, v, ignoreNotFound)
if err != nil {
return err
}
trace.Step("Retry value restored")
mustCheckData = false
continue
}
putResp := txnResp.Responses[0].GetResponsePut()
return decode(s.codec, s.versioner, data, out, putResp.Header.Revision)
}
}
// GetToList implements storage.Interface.GetToList.
func (s *store) GetToList(ctx context.Context, key string, listOpts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := listOpts.ResourceVersion
match := listOpts.ResourceVersionMatch
pred := listOpts.Predicate
trace := utiltrace.New("GetToList etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
newItemFunc := getNewItemFunc(listObj, v)
key = path.Join(s.pathPrefix, key)
startTime := time.Now()
var opts []clientv3.OpOption
if len(resourceVersion) > 0 && match == metav1.ResourceVersionMatchExact {
rv, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
opts = append(opts, clientv3.WithRev(int64(rv)))
}
getResp, err := s.client.KV.Get(ctx, key, opts...)
metrics.RecordEtcdRequestLatency("get", getTypeName(listPtr), startTime)
if err != nil {
return err
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
if len(getResp.Kvs) > 0 {
data, _, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return storage.NewInternalError(err.Error())
}
if err := appendListItem(v, data, uint64(getResp.Kvs[0].ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// update version with cluster level revision
return s.versioner.UpdateList(listObj, uint64(getResp.Header.Revision), "", nil)
}
func getNewItemFunc(listObj runtime.Object, v reflect.Value) func() runtime.Object {
// For unstructured lists with a target group/version, preserve the group/version in the instantiated list items
if unstructuredList, isUnstructured := listObj.(*unstructured.UnstructuredList); isUnstructured {
if apiVersion := unstructuredList.GetAPIVersion(); len(apiVersion) > 0 {
return func() runtime.Object {
return &unstructured.Unstructured{Object: map[string]interface{}{"apiVersion": apiVersion}}
}
}
}
// Otherwise just instantiate an empty item
elem := v.Type().Elem()
return func() runtime.Object {
return reflect.New(elem).Interface().(runtime.Object)
}
}
func (s *store) Count(key string) (int64, error) {
key = path.Join(s.pathPrefix, key)
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
startTime := time.Now()
getResp, err := s.client.KV.Get(context.Background(), key, clientv3.WithRange(clientv3.GetPrefixRangeEnd(key)), clientv3.WithCountOnly())
metrics.RecordEtcdRequestLatency("listWithCount", key, startTime)
if err != nil {
return 0, err
}
return getResp.Count, nil
}
// continueToken is a simple structured object for encoding the state of a continue token.
// TODO: if we change the version of the encoded from, we can't start encoding the new version
// until all other servers are upgraded (i.e. we need to support rolling schema)
// This is a public API struct and cannot change.
type continueToken struct {
APIVersion string `json:"v"`
ResourceVersion int64 `json:"rv"`
StartKey string `json:"start"`
}
// parseFrom transforms an encoded predicate from into a versioned struct.
// TODO: return a typed error that instructs clients that they must relist
func decodeContinue(continueValue, keyPrefix string) (fromKey string, rv int64, err error) {
data, err := base64.RawURLEncoding.DecodeString(continueValue)
if err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
var c continueToken
if err := json.Unmarshal(data, &c); err != nil {
return "", 0, fmt.Errorf("continue key is not valid: %v", err)
}
switch c.APIVersion {
case "meta.k8s.io/v1":
if c.ResourceVersion == 0 {
return "", 0, fmt.Errorf("continue key is not valid: incorrect encoded start resourceVersion (version meta.k8s.io/v1)")
}
if len(c.StartKey) == 0 {
return "", 0, fmt.Errorf("continue key is not valid: encoded start key empty (version meta.k8s.io/v1)")
}
// defend against path traversal attacks by clients - path.Clean will ensure that startKey cannot
// be at a higher level of the hierarchy, and so when we append the key prefix we will end up with
// continue start key that is fully qualified and cannot range over anything less specific than
// keyPrefix.
key := c.StartKey
if !strings.HasPrefix(key, "/") {
key = "/" + key
}
cleaned := path.Clean(key)
if cleaned != key {
return "", 0, fmt.Errorf("continue key is not valid: %s", c.StartKey)
}
return keyPrefix + cleaned[1:], c.ResourceVersion, nil
default:
return "", 0, fmt.Errorf("continue key is not valid: server does not recognize this encoded version %q", c.APIVersion)
}
}
// encodeContinue returns a string representing the encoded continuation of the current query.
func encodeContinue(key, keyPrefix string, resourceVersion int64) (string, error) {
nextKey := strings.TrimPrefix(key, keyPrefix)
if nextKey == key {
return "", fmt.Errorf("unable to encode next field: the key and key prefix do not match")
}
out, err := json.Marshal(&continueToken{APIVersion: "meta.k8s.io/v1", ResourceVersion: resourceVersion, StartKey: nextKey})
if err != nil {
return "", err
}
return base64.RawURLEncoding.EncodeToString(out), nil
}
// List implements storage.Interface.List.
func (s *store) List(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
resourceVersion := opts.ResourceVersion
match := opts.ResourceVersionMatch
pred := opts.Predicate
trace := utiltrace.New("List etcd3",
utiltrace.Field{"key", key},
utiltrace.Field{"resourceVersion", resourceVersion},
utiltrace.Field{"resourceVersionMatch", match},
utiltrace.Field{"limit", pred.Limit},
utiltrace.Field{"continue", pred.Continue})
defer trace.LogIfLong(500 * time.Millisecond)
listPtr, err := meta.GetItemsPtr(listObj)
if err != nil {
return err
}
v, err := conversion.EnforcePtr(listPtr)
if err != nil || v.Kind() != reflect.Slice {
return fmt.Errorf("need ptr to slice: %v", err)
}
if s.pathPrefix != "" {
key = path.Join(s.pathPrefix, key)
}
// We need to make sure the key ended with "/" so that we only get children "directories".
// e.g. if we have key "/a", "/a/b", "/ab", getting keys with prefix "/a" will return all three,
// while with prefix "/a/" will return only "/a/b" which is the correct answer.
if !strings.HasSuffix(key, "/") {
key += "/"
}
keyPrefix := key
// set the appropriate clientv3 options to filter the returned data set
var paging bool
options := make([]clientv3.OpOption, 0, 4)
if s.pagingEnabled && pred.Limit > 0 {
paging = true
options = append(options, clientv3.WithLimit(pred.Limit))
}
newItemFunc := getNewItemFunc(listObj, v)
var fromRV *uint64
if len(resourceVersion) > 0 {
parsedRV, err := s.versioner.ParseResourceVersion(resourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
fromRV = &parsedRV
}
var returnedRV, continueRV, withRev int64
var continueKey string
switch {
case s.pagingEnabled && len(pred.Continue) > 0:
continueKey, continueRV, err = decodeContinue(pred.Continue, keyPrefix)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid continue token: %v", err))
}
if len(resourceVersion) > 0 && resourceVersion != "0" {
return apierrors.NewBadRequest("specifying resource version is not allowed when using continue")
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
key = continueKey
// If continueRV > 0, the LIST request needs a specific resource version.
// continueRV==0 is invalid.
// If continueRV < 0, the request is for the latest resource version.
if continueRV > 0 {
withRev = continueRV
returnedRV = continueRV
}
case s.pagingEnabled && pred.Limit > 0:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
if *fromRV > 0 {
returnedRV = int64(*fromRV)
withRev = returnedRV
}
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
rangeEnd := clientv3.GetPrefixRangeEnd(keyPrefix)
options = append(options, clientv3.WithRange(rangeEnd))
default:
if fromRV != nil {
switch match {
case metav1.ResourceVersionMatchNotOlderThan:
// The not older than constraint is checked after we get a response from etcd,
// and returnedRV is then set to the revision we get from the etcd response.
case metav1.ResourceVersionMatchExact:
returnedRV = int64(*fromRV)
withRev = returnedRV
case "": // legacy case
default:
return fmt.Errorf("unknown ResourceVersionMatch value: %v", match)
}
}
options = append(options, clientv3.WithPrefix())
}
if withRev != 0 {
options = append(options, clientv3.WithRev(withRev))
}
// loop until we have filled the requested limit from etcd or there are no more results
var lastKey []byte
var hasMore bool
var getResp *clientv3.GetResponse
for {
startTime := time.Now()
getResp, err = s.client.KV.Get(ctx, key, options...)
metrics.RecordEtcdRequestLatency("list", getTypeName(listPtr), startTime)
if err != nil {
return interpretListError(err, len(pred.Continue) > 0, continueKey, keyPrefix)
}
if err = s.validateMinimumResourceVersion(resourceVersion, uint64(getResp.Header.Revision)); err != nil {
return err
}
hasMore = getResp.More
if len(getResp.Kvs) == 0 && getResp.More {
return fmt.Errorf("no results were found, but etcd indicated there were more values remaining")
}
// avoid small allocations for the result slice, since this can be called in many
// different contexts and we don't know how significantly the result will be filtered
if pred.Empty() {
growSlice(v, len(getResp.Kvs))
} else {
growSlice(v, 2048, len(getResp.Kvs))
}
// take items from the response until the bucket is full, filtering as we go
for _, kv := range getResp.Kvs {
if paging && int64(v.Len()) >= pred.Limit {
hasMore = true
break
}
lastKey = kv.Key
data, _, err := s.transformer.TransformFromStorage(kv.Value, authenticatedDataString(kv.Key))
if err != nil {
return storage.NewInternalErrorf("unable to transform key %q: %v", kv.Key, err)
}
if err := appendListItem(v, data, uint64(kv.ModRevision), pred, s.codec, s.versioner, newItemFunc); err != nil {
return err
}
}
// indicate to the client which resource version was returned
if returnedRV == 0 {
returnedRV = getResp.Header.Revision
}
// no more results remain or we didn't request paging
if !hasMore || !paging {
break
}
// we're paging but we have filled our bucket
if int64(v.Len()) >= pred.Limit {
break
}
key = string(lastKey) + "\x00"
if withRev == 0 {
withRev = returnedRV
options = append(options, clientv3.WithRev(withRev))
}
}
// instruct the client to begin querying from immediately after the last key we returned
// we never return a key that the client wouldn't be allowed to see
if hasMore {
// we want to start immediately after the last key
next, err := encodeContinue(string(lastKey)+"\x00", keyPrefix, returnedRV)
if err != nil {
return err
}
var remainingItemCount *int64
// getResp.Count counts in objects that do not match the pred.
// Instead of returning inaccurate count for non-empty selectors, we return nil.
// Only set remainingItemCount if the predicate is empty.
if utilfeature.DefaultFeatureGate.Enabled(features.RemainingItemCount) {
if pred.Empty() {
c := int64(getResp.Count - pred.Limit)
remainingItemCount = &c
}
}
return s.versioner.UpdateList(listObj, uint64(returnedRV), next, remainingItemCount)
}
// no continuation
return s.versioner.UpdateList(listObj, uint64(returnedRV), "", nil)
}
// growSlice takes a slice value and grows its capacity up
// to the maximum of the passed sizes or maxCapacity, whichever
// is smaller. Above maxCapacity decisions about allocation are left
// to the Go runtime on append. This allows a caller to make an
// educated guess about the potential size of the total list while
// still avoiding overly aggressive initial allocation. If sizes
// is empty maxCapacity will be used as the size to grow.
func growSlice(v reflect.Value, maxCapacity int, sizes ...int) {
cap := v.Cap()
max := cap
for _, size := range sizes {
if size > max {
max = size
}
}
if len(sizes) == 0 || max > maxCapacity {
max = maxCapacity
}
if max <= cap {
return
}
if v.Len() > 0 {
extra := reflect.MakeSlice(v.Type(), 0, max)
reflect.Copy(extra, v)
v.Set(extra)
} else {
extra := reflect.MakeSlice(v.Type(), 0, max)
v.Set(extra)
}
}
// Watch implements storage.Interface.Watch.
func (s *store) Watch(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, false)
}
// WatchList implements storage.Interface.WatchList.
func (s *store) WatchList(ctx context.Context, key string, opts storage.ListOptions) (watch.Interface, error) {
return s.watch(ctx, key, opts, true)
}
func (s *store) watch(ctx context.Context, key string, opts storage.ListOptions, recursive bool) (watch.Interface, error) {
rev, err := s.versioner.ParseResourceVersion(opts.ResourceVersion)
if err != nil {
return nil, err
}
key = path.Join(s.pathPrefix, key)
return s.watcher.Watch(ctx, key, int64(rev), recursive, opts.ProgressNotify, opts.Predicate)
}
func (s *store) getState(getResp *clientv3.GetResponse, key string, v reflect.Value, ignoreNotFound bool) (*objState, error) {
state := &objState{
meta: &storage.ResponseMeta{},
}
if u, ok := v.Addr().Interface().(runtime.Unstructured); ok {
state.obj = u.NewEmptyInstance()
} else {
state.obj = reflect.New(v.Type()).Interface().(runtime.Object)
}
if len(getResp.Kvs) == 0 {
if !ignoreNotFound {
return nil, storage.NewKeyNotFoundError(key, 0)
}
if err := runtime.SetZeroValue(state.obj); err != nil {
return nil, err
}
} else {
data, stale, err := s.transformer.TransformFromStorage(getResp.Kvs[0].Value, authenticatedDataString(key))
if err != nil {
return nil, storage.NewInternalError(err.Error())
}
state.rev = getResp.Kvs[0].ModRevision
state.meta.ResourceVersion = uint64(state.rev)
state.data = data
state.stale = stale
if err := decode(s.codec, s.versioner, state.data, state.obj, state.rev); err != nil {
return nil, err
}
}
return state, nil
}
func (s *store) getStateFromObject(obj runtime.Object) (*objState, error) {
state := &objState{
obj: obj,
meta: &storage.ResponseMeta{},
}
rv, err := s.versioner.ObjectResourceVersion(obj)
if err != nil {
return nil, fmt.Errorf("couldn't get resource version: %v", err)
}
state.rev = int64(rv)
state.meta.ResourceVersion = uint64(state.rev)
// Compute the serialized form - for that we need to temporarily clean
// its resource version field (those are not stored in etcd).
if err := s.versioner.PrepareObjectForStorage(obj); err != nil {
return nil, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
state.data, err = runtime.Encode(s.codec, obj)
if err != nil {
return nil, err
}
if err := s.versioner.UpdateObject(state.obj, uint64(rv)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return state, nil
}
func (s *store) updateState(st *objState, userUpdate storage.UpdateFunc) (runtime.Object, uint64, error) {
ret, ttlPtr, err := userUpdate(st.obj, *st.meta)
if err != nil {
return nil, 0, err
}
if err := s.versioner.PrepareObjectForStorage(ret); err != nil {
return nil, 0, fmt.Errorf("PrepareObjectForStorage failed: %v", err)
}
var ttl uint64
if ttlPtr != nil {
ttl = *ttlPtr
}
return ret, ttl, nil
}
// ttlOpts returns client options based on given ttl.
// ttl: if ttl is non-zero, it will attach the key to a lease with ttl of roughly the same length
func (s *store) ttlOpts(ctx context.Context, ttl int64) ([]clientv3.OpOption, error) {
if ttl == 0 {
return nil, nil
}
id, err := s.leaseManager.GetLease(ctx, ttl)
if err != nil {
return nil, err
}
return []clientv3.OpOption{clientv3.WithLease(id)}, nil
}
// validateMinimumResourceVersion returns a 'too large resource' version error when the provided minimumResourceVersion is
// greater than the most recent actualRevision available from storage.
func (s *store) validateMinimumResourceVersion(minimumResourceVersion string, actualRevision uint64) error {
if minimumResourceVersion == "" {
return nil
}
minimumRV, err := s.versioner.ParseResourceVersion(minimumResourceVersion)
if err != nil {
return apierrors.NewBadRequest(fmt.Sprintf("invalid resource version: %v", err))
}
// Enforce the storage.Interface guarantee that the resource version of the returned data
// "will be at least 'resourceVersion'".
if minimumRV > actualRevision {
return storage.NewTooLargeResourceVersionError(minimumRV, actualRevision, 0)
}
return nil
}
// decode decodes value of bytes into object. It will also set the object resource version to rev.
// On success, objPtr would be set to the object.
func decode(codec runtime.Codec, versioner storage.Versioner, value []byte, objPtr runtime.Object, rev int64) error {
if _, err := conversion.EnforcePtr(objPtr); err != nil {
return fmt.Errorf("unable to convert output object to pointer: %v", err)
}
_, _, err := codec.Decode(value, nil, objPtr)
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(objPtr, uint64(rev)); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
return nil
}
// appendListItem decodes and appends the object (if it passes filter) to v, which must be a slice.
func appendListItem(v reflect.Value, data []byte, rev uint64, pred storage.SelectionPredicate, codec runtime.Codec, versioner storage.Versioner, newItemFunc func() runtime.Object) error {
obj, _, err := codec.Decode(data, nil, newItemFunc())
if err != nil {
return err
}
// being unable to set the version does not prevent the object from being extracted
if err := versioner.UpdateObject(obj, rev); err != nil {
klog.Errorf("failed to update object version: %v", err)
}
if matched, err := pred.Matches(obj); err == nil && matched {
v.Set(reflect.Append(v, reflect.ValueOf(obj).Elem()))
}
return nil
}
func notFound(key string) clientv3.Cmp {
return clientv3.Compare(clientv3.ModRevision(key), "=", 0)
}
// getTypeName returns type name of an object for reporting purposes.
func getTypeName(obj interface{}) string {
return reflect.TypeOf(obj).String()
}
| staging/src/k8s.io/apiserver/pkg/storage/etcd3/store.go | 1 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.96177738904953,
0.08122016489505768,
0.0001673928345553577,
0.0005679408786818385,
0.23067359626293182
] |
{
"id": 14,
"code_window": [
"\t// return cur, nil, nil\n",
"\t// },\n",
"\t// )\n",
"\tGuaranteedUpdate(\n",
"\t\tctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,\n",
"\t\tprecondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error\n",
"\n",
"\t// Count returns number of different entries under the key (generally being path prefix).\n",
"\tCount(key string) (int64, error)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tprecondtions *Preconditions, tryUpdate UpdateFunc, cachedExistingObject runtime.Object) error\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/interfaces.go",
"type": "replace",
"edit_start_line_idx": 246
} | package longpath
import (
"path/filepath"
"strings"
)
// LongAbs makes a path absolute and returns it in NT long path form.
func LongAbs(path string) (string, error) {
if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) {
return path, nil
}
if !filepath.IsAbs(path) {
absPath, err := filepath.Abs(path)
if err != nil {
return "", err
}
path = absPath
}
if strings.HasPrefix(path, `\\`) {
return `\\?\UNC\` + path[2:], nil
}
return `\\?\` + path, nil
}
| vendor/github.com/Microsoft/hcsshim/internal/longpath/longpath.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.0002907739544752985,
0.00021280946384649724,
0.0001711193035589531,
0.00017653510440140963,
0.00005517354657058604
] |
{
"id": 14,
"code_window": [
"\t// return cur, nil, nil\n",
"\t// },\n",
"\t// )\n",
"\tGuaranteedUpdate(\n",
"\t\tctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,\n",
"\t\tprecondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error\n",
"\n",
"\t// Count returns number of different entries under the key (generally being path prefix).\n",
"\tCount(key string) (int64, error)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tprecondtions *Preconditions, tryUpdate UpdateFunc, cachedExistingObject runtime.Object) error\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/interfaces.go",
"type": "replace",
"edit_start_line_idx": 246
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = ["storage_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/registry/registrytest:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/generic/testing:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/etcd3/testing:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["storage.go"],
importpath = "k8s.io/kubernetes/pkg/registry/core/event/storage",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/printers:go_default_library",
"//pkg/printers/internalversion:go_default_library",
"//pkg/printers/storage:go_default_library",
"//pkg/registry/core/event:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/generic:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/generic/registry:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/registry/rest:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| pkg/registry/core/event/storage/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.00017391702567692846,
0.00017195510736200958,
0.00016717576363589615,
0.00017253472469747066,
0.0000022025315047358163
] |
{
"id": 14,
"code_window": [
"\t// return cur, nil, nil\n",
"\t// },\n",
"\t// )\n",
"\tGuaranteedUpdate(\n",
"\t\tctx context.Context, key string, ptrToType runtime.Object, ignoreNotFound bool,\n",
"\t\tprecondtions *Preconditions, tryUpdate UpdateFunc, suggestion runtime.Object) error\n",
"\n",
"\t// Count returns number of different entries under the key (generally being path prefix).\n",
"\tCount(key string) (int64, error)\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tprecondtions *Preconditions, tryUpdate UpdateFunc, cachedExistingObject runtime.Object) error\n"
],
"file_path": "staging/src/k8s.io/apiserver/pkg/storage/interfaces.go",
"type": "replace",
"edit_start_line_idx": 246
} | package govalidator
import (
"errors"
"fmt"
"html"
"math"
"path"
"regexp"
"strings"
"unicode"
"unicode/utf8"
)
// Contains check if the string contains the substring.
func Contains(str, substring string) bool {
return strings.Contains(str, substring)
}
// Matches check if string matches the pattern (pattern is regular expression)
// In case of error return false
func Matches(str, pattern string) bool {
match, _ := regexp.MatchString(pattern, str)
return match
}
// LeftTrim trim characters from the left-side of the input.
// If second argument is empty, it's will be remove leading spaces.
func LeftTrim(str, chars string) string {
if chars == "" {
return strings.TrimLeftFunc(str, unicode.IsSpace)
}
r, _ := regexp.Compile("^[" + chars + "]+")
return r.ReplaceAllString(str, "")
}
// RightTrim trim characters from the right-side of the input.
// If second argument is empty, it's will be remove spaces.
func RightTrim(str, chars string) string {
if chars == "" {
return strings.TrimRightFunc(str, unicode.IsSpace)
}
r, _ := regexp.Compile("[" + chars + "]+$")
return r.ReplaceAllString(str, "")
}
// Trim trim characters from both sides of the input.
// If second argument is empty, it's will be remove spaces.
func Trim(str, chars string) string {
return LeftTrim(RightTrim(str, chars), chars)
}
// WhiteList remove characters that do not appear in the whitelist.
func WhiteList(str, chars string) string {
pattern := "[^" + chars + "]+"
r, _ := regexp.Compile(pattern)
return r.ReplaceAllString(str, "")
}
// BlackList remove characters that appear in the blacklist.
func BlackList(str, chars string) string {
pattern := "[" + chars + "]+"
r, _ := regexp.Compile(pattern)
return r.ReplaceAllString(str, "")
}
// StripLow remove characters with a numerical value < 32 and 127, mostly control characters.
// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
func StripLow(str string, keepNewLines bool) string {
chars := ""
if keepNewLines {
chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
} else {
chars = "\x00-\x1F\x7F"
}
return BlackList(str, chars)
}
// ReplacePattern replace regular expression pattern in string
func ReplacePattern(str, pattern, replace string) string {
r, _ := regexp.Compile(pattern)
return r.ReplaceAllString(str, replace)
}
// Escape replace <, >, & and " with HTML entities.
var Escape = html.EscapeString
func addSegment(inrune, segment []rune) []rune {
if len(segment) == 0 {
return inrune
}
if len(inrune) != 0 {
inrune = append(inrune, '_')
}
inrune = append(inrune, segment...)
return inrune
}
// UnderscoreToCamelCase converts from underscore separated form to camel case form.
// Ex.: my_func => MyFunc
func UnderscoreToCamelCase(s string) string {
return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
}
// CamelCaseToUnderscore converts from camel case form to underscore separated form.
// Ex.: MyFunc => my_func
func CamelCaseToUnderscore(str string) string {
var output []rune
var segment []rune
for _, r := range str {
// not treat number as separate segment
if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
output = addSegment(output, segment)
segment = nil
}
segment = append(segment, unicode.ToLower(r))
}
output = addSegment(output, segment)
return string(output)
}
// Reverse return reversed string
func Reverse(s string) string {
r := []rune(s)
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return string(r)
}
// GetLines split string by "\n" and return array of lines
func GetLines(s string) []string {
return strings.Split(s, "\n")
}
// GetLine return specified line of multiline string
func GetLine(s string, index int) (string, error) {
lines := GetLines(s)
if index < 0 || index >= len(lines) {
return "", errors.New("line index out of bounds")
}
return lines[index], nil
}
// RemoveTags remove all tags from HTML string
func RemoveTags(s string) string {
return ReplacePattern(s, "<[^>]*>", "")
}
// SafeFileName return safe string that can be used in file names
func SafeFileName(str string) string {
name := strings.ToLower(str)
name = path.Clean(path.Base(name))
name = strings.Trim(name, " ")
separators, err := regexp.Compile(`[ &_=+:]`)
if err == nil {
name = separators.ReplaceAllString(name, "-")
}
legal, err := regexp.Compile(`[^[:alnum:]-.]`)
if err == nil {
name = legal.ReplaceAllString(name, "")
}
for strings.Contains(name, "--") {
name = strings.Replace(name, "--", "-", -1)
}
return name
}
// NormalizeEmail canonicalize an email address.
// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
// are stripped of tags (e.g. [email protected] becomes [email protected]) and all @googlemail.com addresses are
// normalized to @gmail.com.
func NormalizeEmail(str string) (string, error) {
if !IsEmail(str) {
return "", fmt.Errorf("%s is not an email", str)
}
parts := strings.Split(str, "@")
parts[0] = strings.ToLower(parts[0])
parts[1] = strings.ToLower(parts[1])
if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
parts[1] = "gmail.com"
parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
}
return strings.Join(parts, "@"), nil
}
// Truncate a string to the closest length without breaking words.
func Truncate(str string, length int, ending string) string {
var aftstr, befstr string
if len(str) > length {
words := strings.Fields(str)
before, present := 0, 0
for i := range words {
befstr = aftstr
before = present
aftstr = aftstr + words[i] + " "
present = len(aftstr)
if present > length && i != 0 {
if (length - before) < (present - length) {
return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
}
return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
}
}
}
return str
}
// PadLeft pad left side of string if size of string is less then indicated pad length
func PadLeft(str string, padStr string, padLen int) string {
return buildPadStr(str, padStr, padLen, true, false)
}
// PadRight pad right side of string if size of string is less then indicated pad length
func PadRight(str string, padStr string, padLen int) string {
return buildPadStr(str, padStr, padLen, false, true)
}
// PadBoth pad sides of string if size of string is less then indicated pad length
func PadBoth(str string, padStr string, padLen int) string {
return buildPadStr(str, padStr, padLen, true, true)
}
// PadString either left, right or both sides, not the padding string can be unicode and more then one
// character
func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
// When padded length is less then the current string size
if padLen < utf8.RuneCountInString(str) {
return str
}
padLen -= utf8.RuneCountInString(str)
targetLen := padLen
targetLenLeft := targetLen
targetLenRight := targetLen
if padLeft && padRight {
targetLenLeft = padLen / 2
targetLenRight = padLen - targetLenLeft
}
strToRepeatLen := utf8.RuneCountInString(padStr)
repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
repeatedString := strings.Repeat(padStr, repeatTimes)
leftSide := ""
if padLeft {
leftSide = repeatedString[0:targetLenLeft]
}
rightSide := ""
if padRight {
rightSide = repeatedString[0:targetLenRight]
}
return leftSide + str + rightSide
}
// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
func TruncatingErrorf(str string, args ...interface{}) error {
n := strings.Count(str, "%s")
return fmt.Errorf(str, args[:n]...)
}
| vendor/github.com/asaskevich/govalidator/utils.go | 0 | https://github.com/kubernetes/kubernetes/commit/d0726e4b1354b1c8c3978b96ab7b01d13a2b6340 | [
0.9963867664337158,
0.03878387063741684,
0.00016222530393861234,
0.00024894802481867373,
0.18461838364601135
] |
{
"id": 0,
"code_window": [
"\t\"k8s.io/component-base/logs\"\n",
"\t\"k8s.io/component-base/metrics\"\n",
"\tcmconfig \"k8s.io/controller-manager/config\"\n",
"\tcmoptions \"k8s.io/controller-manager/options\"\n",
"\tkubecontrollerconfig \"k8s.io/kubernetes/cmd/kube-controller-manager/app/config\"\n",
"\tkubectrlmgrconfig \"k8s.io/kubernetes/pkg/controller/apis/config\"\n",
"\tcsrsigningconfig \"k8s.io/kubernetes/pkg/controller/certificates/signer/config\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmigration \"k8s.io/controller-manager/pkg/leadermigration/options\"\n"
],
"file_path": "cmd/kube-controller-manager/app/options/options_test.go",
"type": "add",
"edit_start_line_idx": 37
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/config/options"
cmconfig "k8s.io/controller-manager/config"
migration "k8s.io/controller-manager/pkg/leadermigration/options"
)
// GenericControllerManagerConfigurationOptions holds the options which are generic.
type GenericControllerManagerConfigurationOptions struct {
*cmconfig.GenericControllerManagerConfiguration
Debugging *DebuggingOptions
// LeaderMigration is the options for leader migration, a nil indicates default options should be applied.
LeaderMigration *migration.LeaderMigrationOptions
}
// NewGenericControllerManagerConfigurationOptions returns generic configuration default values for both
// the kube-controller-manager and the cloud-contoller-manager. Any common changes should
// be made here. Any individual changes should be made in that controller.
func NewGenericControllerManagerConfigurationOptions(cfg *cmconfig.GenericControllerManagerConfiguration) *GenericControllerManagerConfigurationOptions {
o := &GenericControllerManagerConfigurationOptions{
GenericControllerManagerConfiguration: cfg,
Debugging: RecommendedDebuggingOptions(),
LeaderMigration: nil,
}
return o
}
// AddFlags adds flags related to generic for controller manager to the specified FlagSet.
func (o *GenericControllerManagerConfigurationOptions) AddFlags(fss *cliflag.NamedFlagSets, allControllers, disabledByDefaultControllers []string) {
if o == nil {
return
}
o.Debugging.AddFlags(fss.FlagSet("debugging"))
o.LeaderMigration.AddFlags(fss.FlagSet("leader-migration"))
genericfs := fss.FlagSet("generic")
genericfs.DurationVar(&o.MinResyncPeriod.Duration, "min-resync-period", o.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.")
genericfs.StringVar(&o.ClientConnection.ContentType, "kube-api-content-type", o.ClientConnection.ContentType, "Content type of requests sent to apiserver.")
genericfs.Float32Var(&o.ClientConnection.QPS, "kube-api-qps", o.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver.")
genericfs.Int32Var(&o.ClientConnection.Burst, "kube-api-burst", o.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver.")
genericfs.DurationVar(&o.ControllerStartInterval.Duration, "controller-start-interval", o.ControllerStartInterval.Duration, "Interval between starting controller managers.")
genericfs.StringSliceVar(&o.Controllers, "controllers", o.Controllers, fmt.Sprintf(""+
"A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+
"named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s",
strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", ")))
options.BindLeaderElectionFlags(&o.LeaderElection, genericfs)
}
// ApplyTo fills up generic config with options.
func (o *GenericControllerManagerConfigurationOptions) ApplyTo(cfg *cmconfig.GenericControllerManagerConfiguration) error {
if o == nil {
return nil
}
if err := o.Debugging.ApplyTo(&cfg.Debugging); err != nil {
return err
}
if err := o.LeaderMigration.ApplyTo(cfg); err != nil {
return err
}
cfg.Port = o.Port
cfg.Address = o.Address
cfg.MinResyncPeriod = o.MinResyncPeriod
cfg.ClientConnection = o.ClientConnection
cfg.ControllerStartInterval = o.ControllerStartInterval
cfg.LeaderElection = o.LeaderElection
cfg.Controllers = o.Controllers
return nil
}
// Validate checks validation of GenericOptions.
func (o *GenericControllerManagerConfigurationOptions) Validate(allControllers []string, disabledByDefaultControllers []string) []error {
if o == nil {
return nil
}
errs := []error{}
errs = append(errs, o.Debugging.Validate()...)
allControllersSet := sets.NewString(allControllers...)
for _, controller := range o.Controllers {
if controller == "*" {
continue
}
controller = strings.TrimPrefix(controller, "-")
if !allControllersSet.Has(controller) {
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller))
}
}
return errs
}
| staging/src/k8s.io/controller-manager/options/generic.go | 1 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.014305219054222107,
0.0029892639722675085,
0.0001650162012083456,
0.00020967898308299482,
0.004812292288988829
] |
{
"id": 0,
"code_window": [
"\t\"k8s.io/component-base/logs\"\n",
"\t\"k8s.io/component-base/metrics\"\n",
"\tcmconfig \"k8s.io/controller-manager/config\"\n",
"\tcmoptions \"k8s.io/controller-manager/options\"\n",
"\tkubecontrollerconfig \"k8s.io/kubernetes/cmd/kube-controller-manager/app/config\"\n",
"\tkubectrlmgrconfig \"k8s.io/kubernetes/pkg/controller/apis/config\"\n",
"\tcsrsigningconfig \"k8s.io/kubernetes/pkg/controller/certificates/signer/config\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmigration \"k8s.io/controller-manager/pkg/leadermigration/options\"\n"
],
"file_path": "cmd/kube-controller-manager/app/options/options_test.go",
"type": "add",
"edit_start_line_idx": 37
} | package netlink
import (
"fmt"
"math"
)
const (
HANDLE_NONE = 0
HANDLE_INGRESS = 0xFFFFFFF1
HANDLE_CLSACT = HANDLE_INGRESS
HANDLE_ROOT = 0xFFFFFFFF
PRIORITY_MAP_LEN = 16
)
const (
HANDLE_MIN_INGRESS = 0xFFFFFFF2
HANDLE_MIN_EGRESS = 0xFFFFFFF3
)
type Qdisc interface {
Attrs() *QdiscAttrs
Type() string
}
// QdiscAttrs represents a netlink qdisc. A qdisc is associated with a link,
// has a handle, a parent and a refcnt. The root qdisc of a device should
// have parent == HANDLE_ROOT.
type QdiscAttrs struct {
LinkIndex int
Handle uint32
Parent uint32
Refcnt uint32 // read only
}
func (q QdiscAttrs) String() string {
return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Refcnt: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Refcnt)
}
func MakeHandle(major, minor uint16) uint32 {
return (uint32(major) << 16) | uint32(minor)
}
func MajorMinor(handle uint32) (uint16, uint16) {
return uint16((handle & 0xFFFF0000) >> 16), uint16(handle & 0x0000FFFFF)
}
func HandleStr(handle uint32) string {
switch handle {
case HANDLE_NONE:
return "none"
case HANDLE_INGRESS:
return "ingress"
case HANDLE_ROOT:
return "root"
default:
major, minor := MajorMinor(handle)
return fmt.Sprintf("%x:%x", major, minor)
}
}
func Percentage2u32(percentage float32) uint32 {
// FIXME this is most likely not the best way to convert from % to uint32
if percentage == 100 {
return math.MaxUint32
}
return uint32(math.MaxUint32 * (percentage / 100))
}
// PfifoFast is the default qdisc created by the kernel if one has not
// been defined for the interface
type PfifoFast struct {
QdiscAttrs
Bands uint8
PriorityMap [PRIORITY_MAP_LEN]uint8
}
func (qdisc *PfifoFast) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *PfifoFast) Type() string {
return "pfifo_fast"
}
// Prio is a basic qdisc that works just like PfifoFast
type Prio struct {
QdiscAttrs
Bands uint8
PriorityMap [PRIORITY_MAP_LEN]uint8
}
func NewPrio(attrs QdiscAttrs) *Prio {
return &Prio{
QdiscAttrs: attrs,
Bands: 3,
PriorityMap: [PRIORITY_MAP_LEN]uint8{1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1},
}
}
func (qdisc *Prio) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *Prio) Type() string {
return "prio"
}
// Htb is a classful qdisc that rate limits based on tokens
type Htb struct {
QdiscAttrs
Version uint32
Rate2Quantum uint32
Defcls uint32
Debug uint32
DirectPkts uint32
}
func NewHtb(attrs QdiscAttrs) *Htb {
return &Htb{
QdiscAttrs: attrs,
Version: 3,
Defcls: 0,
Rate2Quantum: 10,
Debug: 0,
DirectPkts: 0,
}
}
func (qdisc *Htb) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *Htb) Type() string {
return "htb"
}
// Netem is a classless qdisc that rate limits based on tokens
type NetemQdiscAttrs struct {
Latency uint32 // in us
DelayCorr float32 // in %
Limit uint32
Loss float32 // in %
LossCorr float32 // in %
Gap uint32
Duplicate float32 // in %
DuplicateCorr float32 // in %
Jitter uint32 // in us
ReorderProb float32 // in %
ReorderCorr float32 // in %
CorruptProb float32 // in %
CorruptCorr float32 // in %
}
func (q NetemQdiscAttrs) String() string {
return fmt.Sprintf(
"{Latency: %d, Limit: %d, Loss: %f, Gap: %d, Duplicate: %f, Jitter: %d}",
q.Latency, q.Limit, q.Loss, q.Gap, q.Duplicate, q.Jitter,
)
}
type Netem struct {
QdiscAttrs
Latency uint32
DelayCorr uint32
Limit uint32
Loss uint32
LossCorr uint32
Gap uint32
Duplicate uint32
DuplicateCorr uint32
Jitter uint32
ReorderProb uint32
ReorderCorr uint32
CorruptProb uint32
CorruptCorr uint32
}
func (netem *Netem) String() string {
return fmt.Sprintf(
"{Latency: %v, Limit: %v, Loss: %v, Gap: %v, Duplicate: %v, Jitter: %v}",
netem.Latency, netem.Limit, netem.Loss, netem.Gap, netem.Duplicate, netem.Jitter,
)
}
func (qdisc *Netem) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *Netem) Type() string {
return "netem"
}
// Tbf is a classless qdisc that rate limits based on tokens
type Tbf struct {
QdiscAttrs
Rate uint64
Limit uint32
Buffer uint32
Peakrate uint64
Minburst uint32
// TODO: handle other settings
}
func (qdisc *Tbf) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *Tbf) Type() string {
return "tbf"
}
// Ingress is a qdisc for adding ingress filters
type Ingress struct {
QdiscAttrs
}
func (qdisc *Ingress) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *Ingress) Type() string {
return "ingress"
}
// GenericQdisc qdiscs represent types that are not currently understood
// by this netlink library.
type GenericQdisc struct {
QdiscAttrs
QdiscType string
}
func (qdisc *GenericQdisc) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *GenericQdisc) Type() string {
return qdisc.QdiscType
}
type Hfsc struct {
QdiscAttrs
Defcls uint16
}
func NewHfsc(attrs QdiscAttrs) *Hfsc {
return &Hfsc{
QdiscAttrs: attrs,
Defcls: 1,
}
}
func (hfsc *Hfsc) Attrs() *QdiscAttrs {
return &hfsc.QdiscAttrs
}
func (hfsc *Hfsc) Type() string {
return "hfsc"
}
func (hfsc *Hfsc) String() string {
return fmt.Sprintf(
"{%v -- default: %d}",
hfsc.Attrs(), hfsc.Defcls,
)
}
// Fq is a classless packet scheduler meant to be mostly used for locally generated traffic.
type Fq struct {
QdiscAttrs
PacketLimit uint32
FlowPacketLimit uint32
// In bytes
Quantum uint32
InitialQuantum uint32
// called RateEnable under the hood
Pacing uint32
FlowDefaultRate uint32
FlowMaxRate uint32
// called BucketsLog under the hood
Buckets uint32
FlowRefillDelay uint32
LowRateThreshold uint32
}
func (fq *Fq) String() string {
return fmt.Sprintf(
"{PacketLimit: %v, FlowPacketLimit: %v, Quantum: %v, InitialQuantum: %v, Pacing: %v, FlowDefaultRate: %v, FlowMaxRate: %v, Buckets: %v, FlowRefillDelay: %v, LowRateThreshold: %v}",
fq.PacketLimit, fq.FlowPacketLimit, fq.Quantum, fq.InitialQuantum, fq.Pacing, fq.FlowDefaultRate, fq.FlowMaxRate, fq.Buckets, fq.FlowRefillDelay, fq.LowRateThreshold,
)
}
func NewFq(attrs QdiscAttrs) *Fq {
return &Fq{
QdiscAttrs: attrs,
Pacing: 1,
}
}
func (qdisc *Fq) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *Fq) Type() string {
return "fq"
}
// FQ_Codel (Fair Queuing Controlled Delay) is queuing discipline that combines Fair Queuing with the CoDel AQM scheme.
type FqCodel struct {
QdiscAttrs
Target uint32
Limit uint32
Interval uint32
ECN uint32
Flows uint32
Quantum uint32
// There are some more attributes here, but support for them seems not ubiquitous
}
func (fqcodel *FqCodel) String() string {
return fmt.Sprintf(
"{%v -- Target: %v, Limit: %v, Interval: %v, ECM: %v, Flows: %v, Quantum: %v}",
fqcodel.Attrs(), fqcodel.Target, fqcodel.Limit, fqcodel.Interval, fqcodel.ECN, fqcodel.Flows, fqcodel.Quantum,
)
}
func NewFqCodel(attrs QdiscAttrs) *FqCodel {
return &FqCodel{
QdiscAttrs: attrs,
ECN: 1,
}
}
func (qdisc *FqCodel) Attrs() *QdiscAttrs {
return &qdisc.QdiscAttrs
}
func (qdisc *FqCodel) Type() string {
return "fq_codel"
}
| vendor/github.com/vishvananda/netlink/qdisc.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.0003574116271920502,
0.00017386589024681598,
0.0001603970886208117,
0.00016631813195999712,
0.00003344070864841342
] |
{
"id": 0,
"code_window": [
"\t\"k8s.io/component-base/logs\"\n",
"\t\"k8s.io/component-base/metrics\"\n",
"\tcmconfig \"k8s.io/controller-manager/config\"\n",
"\tcmoptions \"k8s.io/controller-manager/options\"\n",
"\tkubecontrollerconfig \"k8s.io/kubernetes/cmd/kube-controller-manager/app/config\"\n",
"\tkubectrlmgrconfig \"k8s.io/kubernetes/pkg/controller/apis/config\"\n",
"\tcsrsigningconfig \"k8s.io/kubernetes/pkg/controller/certificates/signer/config\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmigration \"k8s.io/controller-manager/pkg/leadermigration/options\"\n"
],
"file_path": "cmd/kube-controller-manager/app/options/options_test.go",
"type": "add",
"edit_start_line_idx": 37
} | {
"kind": "PodDisruptionBudget",
"apiVersion": "policy/v1",
"metadata": {
"name": "2",
"generateName": "3",
"namespace": "4",
"selfLink": "5",
"uid": "7",
"resourceVersion": "11042405498087606203",
"generation": 8071137005907523419,
"creationTimestamp": null,
"deletionGracePeriodSeconds": -4955867275792137171,
"labels": {
"7": "8"
},
"annotations": {
"9": "10"
},
"ownerReferences": [
{
"apiVersion": "11",
"kind": "12",
"name": "13",
"uid": "Dz廔ȇ{sŊƏp",
"controller": false,
"blockOwnerDeletion": true
}
],
"finalizers": [
"14"
],
"clusterName": "15",
"managedFields": [
{
"manager": "16",
"operation": "鐊唊飙Ş-U圴÷a/ɔ}摁(湗Ć]",
"apiVersion": "17",
"fieldsType": "18"
}
]
},
"spec": {
"minAvailable": 2,
"selector": {
"matchLabels": {
"8---jop9641lg.p-g8c2-k-912e5-c-e63-n-3n/E9.8ThjT9s-j41-0-6p-JFHn7y-74.-0MUORQQ.N2.3": "68._bQw.-dG6c-.6--_x.--0wmZk1_8._3s_-_Bq.m_4"
},
"matchExpressions": [
{
"key": "p503---477-49p---o61---4fy--9---7--9-9s-0-u5lj2--10pq-0-7-9-2-0/fP81.-.9Vdx.TB_M-H_5_.t..bG0",
"operator": "In",
"values": [
"D07.a_.y_y_o0_5qN2_---_M.N_._a6.9bHjdH.-.5_.I8__n"
]
}
]
},
"maxUnavailable": 3
},
"status": {
"observedGeneration": -6582200896939805980,
"disruptedPods": {
"25": "2250-04-18T21:52:38Z"
},
"disruptionsAllowed": -1942073618,
"currentHealthy": -2037845840,
"desiredHealthy": -1965578645,
"expectedPods": -347579237,
"conditions": [
{
"type": "26",
"status": "蓿彭聡A3fƻfʣ繡楙¯ĦE勗E濞",
"observedGeneration": -8886126363972008903,
"lastTransitionTime": "2544-09-25T10:59:09Z",
"reason": "27",
"message": "28"
}
]
}
} | staging/src/k8s.io/api/testdata/HEAD/policy.v1.PodDisruptionBudget.json | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00017615007527638227,
0.0001729926443658769,
0.0001711147342575714,
0.00017264166672248393,
0.0000014944776012271177
] |
{
"id": 0,
"code_window": [
"\t\"k8s.io/component-base/logs\"\n",
"\t\"k8s.io/component-base/metrics\"\n",
"\tcmconfig \"k8s.io/controller-manager/config\"\n",
"\tcmoptions \"k8s.io/controller-manager/options\"\n",
"\tkubecontrollerconfig \"k8s.io/kubernetes/cmd/kube-controller-manager/app/config\"\n",
"\tkubectrlmgrconfig \"k8s.io/kubernetes/pkg/controller/apis/config\"\n",
"\tcsrsigningconfig \"k8s.io/kubernetes/pkg/controller/certificates/signer/config\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmigration \"k8s.io/controller-manager/pkg/leadermigration/options\"\n"
],
"file_path": "cmd/kube-controller-manager/app/options/options_test.go",
"type": "add",
"edit_start_line_idx": 37
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mount
import (
"fmt"
"os"
"k8s.io/klog/v2"
)
// CleanupMountPoint unmounts the given path and deletes the remaining directory
// if successful. If extensiveMountPointCheck is true IsNotMountPoint will be
// called instead of IsLikelyNotMountPoint. IsNotMountPoint is more expensive
// but properly handles bind mounts within the same fs.
func CleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool) error {
pathExists, pathErr := PathExists(mountPath)
if !pathExists {
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", mountPath)
return nil
}
corruptedMnt := IsCorruptedMnt(pathErr)
if pathErr != nil && !corruptedMnt {
return fmt.Errorf("Error checking path: %v", pathErr)
}
return doCleanupMountPoint(mountPath, mounter, extensiveMountPointCheck, corruptedMnt)
}
// doCleanupMountPoint unmounts the given path and
// deletes the remaining directory if successful.
// if extensiveMountPointCheck is true
// IsNotMountPoint will be called instead of IsLikelyNotMountPoint.
// IsNotMountPoint is more expensive but properly handles bind mounts within the same fs.
// if corruptedMnt is true, it means that the mountPath is a corrupted mountpoint, and the mount point check
// will be skipped
func doCleanupMountPoint(mountPath string, mounter Interface, extensiveMountPointCheck bool, corruptedMnt bool) error {
var notMnt bool
var err error
if !corruptedMnt {
if extensiveMountPointCheck {
notMnt, err = IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
}
if err != nil {
return err
}
if notMnt {
klog.Warningf("Warning: %q is not a mountpoint, deleting", mountPath)
return os.Remove(mountPath)
}
}
// Unmount the mount path
klog.V(4).Infof("%q is a mountpoint, unmounting", mountPath)
if err := mounter.Unmount(mountPath); err != nil {
return err
}
if extensiveMountPointCheck {
notMnt, err = IsNotMountPoint(mounter, mountPath)
} else {
notMnt, err = mounter.IsLikelyNotMountPoint(mountPath)
}
if err != nil {
return err
}
if notMnt {
klog.V(4).Infof("%q is unmounted, deleting the directory", mountPath)
return os.Remove(mountPath)
}
return fmt.Errorf("Failed to unmount path %v", mountPath)
}
// PathExists returns true if the specified path exists.
// TODO: clean this up to use pkg/util/file/FileExists
func PathExists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
} else if os.IsNotExist(err) {
return false, nil
} else if IsCorruptedMnt(err) {
return true, err
}
return false, err
}
| vendor/k8s.io/utils/mount/mount_helper_common.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.0005857925279997289,
0.00020977872190997005,
0.0001651890343055129,
0.0001711429504211992,
0.00011898476077476516
] |
{
"id": 1,
"code_window": [
"\t\t\t\tDebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{\n",
"\t\t\t\t\tEnableProfiling: false,\n",
"\t\t\t\t\tEnableContentionProfiling: true,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &cpoptions.KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n",
"\t\t\t\tUseServiceAccountCredentials: true,\n",
"\t\t\t\tRouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "cmd/kube-controller-manager/app/options/options_test.go",
"type": "add",
"edit_start_line_idx": 199
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/config/options"
cmconfig "k8s.io/controller-manager/config"
migration "k8s.io/controller-manager/pkg/leadermigration/options"
)
// GenericControllerManagerConfigurationOptions holds the options which are generic.
type GenericControllerManagerConfigurationOptions struct {
*cmconfig.GenericControllerManagerConfiguration
Debugging *DebuggingOptions
// LeaderMigration is the options for leader migration, a nil indicates default options should be applied.
LeaderMigration *migration.LeaderMigrationOptions
}
// NewGenericControllerManagerConfigurationOptions returns generic configuration default values for both
// the kube-controller-manager and the cloud-contoller-manager. Any common changes should
// be made here. Any individual changes should be made in that controller.
func NewGenericControllerManagerConfigurationOptions(cfg *cmconfig.GenericControllerManagerConfiguration) *GenericControllerManagerConfigurationOptions {
o := &GenericControllerManagerConfigurationOptions{
GenericControllerManagerConfiguration: cfg,
Debugging: RecommendedDebuggingOptions(),
LeaderMigration: nil,
}
return o
}
// AddFlags adds flags related to generic for controller manager to the specified FlagSet.
func (o *GenericControllerManagerConfigurationOptions) AddFlags(fss *cliflag.NamedFlagSets, allControllers, disabledByDefaultControllers []string) {
if o == nil {
return
}
o.Debugging.AddFlags(fss.FlagSet("debugging"))
o.LeaderMigration.AddFlags(fss.FlagSet("leader-migration"))
genericfs := fss.FlagSet("generic")
genericfs.DurationVar(&o.MinResyncPeriod.Duration, "min-resync-period", o.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.")
genericfs.StringVar(&o.ClientConnection.ContentType, "kube-api-content-type", o.ClientConnection.ContentType, "Content type of requests sent to apiserver.")
genericfs.Float32Var(&o.ClientConnection.QPS, "kube-api-qps", o.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver.")
genericfs.Int32Var(&o.ClientConnection.Burst, "kube-api-burst", o.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver.")
genericfs.DurationVar(&o.ControllerStartInterval.Duration, "controller-start-interval", o.ControllerStartInterval.Duration, "Interval between starting controller managers.")
genericfs.StringSliceVar(&o.Controllers, "controllers", o.Controllers, fmt.Sprintf(""+
"A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+
"named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s",
strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", ")))
options.BindLeaderElectionFlags(&o.LeaderElection, genericfs)
}
// ApplyTo fills up generic config with options.
func (o *GenericControllerManagerConfigurationOptions) ApplyTo(cfg *cmconfig.GenericControllerManagerConfiguration) error {
if o == nil {
return nil
}
if err := o.Debugging.ApplyTo(&cfg.Debugging); err != nil {
return err
}
if err := o.LeaderMigration.ApplyTo(cfg); err != nil {
return err
}
cfg.Port = o.Port
cfg.Address = o.Address
cfg.MinResyncPeriod = o.MinResyncPeriod
cfg.ClientConnection = o.ClientConnection
cfg.ControllerStartInterval = o.ControllerStartInterval
cfg.LeaderElection = o.LeaderElection
cfg.Controllers = o.Controllers
return nil
}
// Validate checks validation of GenericOptions.
func (o *GenericControllerManagerConfigurationOptions) Validate(allControllers []string, disabledByDefaultControllers []string) []error {
if o == nil {
return nil
}
errs := []error{}
errs = append(errs, o.Debugging.Validate()...)
allControllersSet := sets.NewString(allControllers...)
for _, controller := range o.Controllers {
if controller == "*" {
continue
}
controller = strings.TrimPrefix(controller, "-")
if !allControllersSet.Has(controller) {
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller))
}
}
return errs
}
| staging/src/k8s.io/controller-manager/options/generic.go | 1 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.0060897707007825375,
0.0006781427073292434,
0.0001628928876016289,
0.00016934573068283498,
0.001632258645258844
] |
{
"id": 1,
"code_window": [
"\t\t\t\tDebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{\n",
"\t\t\t\t\tEnableProfiling: false,\n",
"\t\t\t\t\tEnableContentionProfiling: true,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &cpoptions.KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n",
"\t\t\t\tUseServiceAccountCredentials: true,\n",
"\t\t\t\tRouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "cmd/kube-controller-manager/app/options/options_test.go",
"type": "add",
"edit_start_line_idx": 199
} | # See the OWNERS docs at https://go.k8s.io/owners
reviewers:
- brendandburns
- smarterclayton
- thockin
approvers:
- sig-docs-approvers
- brendandburns
- smarterclayton
- thockin
- pwittrock
| docs/OWNERS | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00017425348050892353,
0.00017279366147704422,
0.0001713338278932497,
0.00017279366147704422,
0.00000145982630783692
] |
{
"id": 1,
"code_window": [
"\t\t\t\tDebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{\n",
"\t\t\t\t\tEnableProfiling: false,\n",
"\t\t\t\t\tEnableContentionProfiling: true,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &cpoptions.KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n",
"\t\t\t\tUseServiceAccountCredentials: true,\n",
"\t\t\t\tRouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "cmd/kube-controller-manager/app/options/options_test.go",
"type": "add",
"edit_start_line_idx": 199
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bytes"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"testing"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/clientcmd"
kubeadmapiv1beta2 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2"
outputapischeme "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/scheme"
outputapiv1alpha1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/output/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/util/output"
)
const (
tokenExpectedRegex = "^\\S{6}\\.\\S{16}\n$"
testConfigToken = `apiVersion: v1
clusters:
- cluster:
certificate-authority-data:
server: localhost:8000
name: prod
contexts:
- context:
cluster: prod
namespace: default
user: default-service-account
name: default
current-context: default
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data:
client-key-data:
`
)
func TestRunGenerateToken(t *testing.T) {
var buf bytes.Buffer
err := RunGenerateToken(&buf)
if err != nil {
t.Errorf("RunGenerateToken returned an error: %v", err)
}
output := buf.String()
matched, err := regexp.MatchString(tokenExpectedRegex, output)
if err != nil {
t.Fatalf("Encountered an error while trying to match RunGenerateToken's output: %v", err)
}
if !matched {
t.Errorf("RunGenerateToken's output did not match expected regex; wanted: [%s], got: [%s]", tokenExpectedRegex, output)
}
}
func TestRunCreateToken(t *testing.T) {
var buf bytes.Buffer
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, apierrors.NewNotFound(v1.Resource("secrets"), "foo")
})
testCases := []struct {
name string
token string
usages []string
extraGroups []string
printJoin bool
expectedError bool
}{
{
name: "valid: empty token",
token: "",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:foo"},
expectedError: false,
},
{
name: "valid: non-empty token",
token: "abcdef.1234567890123456",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:foo"},
expectedError: false,
},
{
name: "valid: no extraGroups",
token: "abcdef.1234567890123456",
usages: []string{"signing", "authentication"},
extraGroups: []string{},
expectedError: false,
},
{
name: "invalid: incorrect extraGroups",
token: "abcdef.1234567890123456",
usages: []string{"signing", "authentication"},
extraGroups: []string{"foo"},
expectedError: true,
},
{
name: "invalid: specifying --groups when --usages doesn't include authentication",
token: "abcdef.1234567890123456",
usages: []string{"signing"},
extraGroups: []string{"foo"},
expectedError: true,
},
{
name: "invalid: partially incorrect usages",
token: "abcdef.1234567890123456",
usages: []string{"foo", "authentication"},
extraGroups: []string{"system:bootstrappers:foo"},
expectedError: true,
},
{
name: "invalid: all incorrect usages",
token: "abcdef.1234567890123456",
usages: []string{"foo", "bar"},
extraGroups: []string{"system:bootstrappers:foo"},
expectedError: true,
},
{
name: "invalid: print join command",
token: "",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:foo"},
printJoin: true,
expectedError: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
bts, err := kubeadmapiv1beta2.NewBootstrapTokenString(tc.token)
if err != nil && len(tc.token) != 0 { // if tc.token is "" it's okay as it will be generated later at runtime
t.Fatalf("token couldn't be parsed for testing: %v", err)
}
cfg := &kubeadmapiv1beta2.InitConfiguration{
BootstrapTokens: []kubeadmapiv1beta2.BootstrapToken{
{
Token: bts,
TTL: &metav1.Duration{Duration: 0},
Usages: tc.usages,
Groups: tc.extraGroups,
},
},
}
err = RunCreateToken(&buf, fakeClient, "", cfg, tc.printJoin, "", "")
if tc.expectedError && err == nil {
t.Error("unexpected success")
} else if !tc.expectedError && err != nil {
t.Errorf("unexpected error: %v", err)
}
})
}
}
func TestNewCmdTokenGenerate(t *testing.T) {
var buf bytes.Buffer
args := []string{}
cmd := newCmdTokenGenerate(&buf)
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Errorf("Cannot execute token command: %v", err)
}
}
func TestNewCmdToken(t *testing.T) {
var buf, bufErr bytes.Buffer
testConfigTokenFile := "test-config-file"
tmpDir, err := ioutil.TempDir("", "kubeadm-token-test")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tmpDir)
fullPath := filepath.Join(tmpDir, testConfigTokenFile)
f, err := os.Create(fullPath)
if err != nil {
t.Errorf("Unable to create test file %q: %v", fullPath, err)
}
defer f.Close()
testCases := []struct {
name string
args []string
configToWrite string
kubeConfigEnv string
expectedError bool
}{
{
name: "valid: generate",
args: []string{"generate"},
configToWrite: "",
expectedError: false,
},
{
name: "valid: delete from --kubeconfig",
args: []string{"delete", "abcdef.1234567890123456", "--dry-run", "--kubeconfig=" + fullPath},
configToWrite: testConfigToken,
expectedError: false,
},
{
name: "valid: delete from " + clientcmd.RecommendedConfigPathEnvVar,
args: []string{"delete", "abcdef.1234567890123456", "--dry-run"},
configToWrite: testConfigToken,
kubeConfigEnv: fullPath,
expectedError: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// the command is created for each test so that the kubeConfigFile
// variable in newCmdToken() is reset.
cmd := newCmdToken(&buf, &bufErr)
if _, err = f.WriteString(tc.configToWrite); err != nil {
t.Errorf("Unable to write test file %q: %v", fullPath, err)
}
// store the current value of the environment variable.
storedEnv := os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
if tc.kubeConfigEnv != "" {
os.Setenv(clientcmd.RecommendedConfigPathEnvVar, tc.kubeConfigEnv)
}
cmd.SetArgs(tc.args)
err := cmd.Execute()
if (err != nil) != tc.expectedError {
t.Errorf("Test case %q: newCmdToken expected error: %v, saw: %v", tc.name, tc.expectedError, (err != nil))
}
// restore the environment variable.
os.Setenv(clientcmd.RecommendedConfigPathEnvVar, storedEnv)
})
}
}
func TestGetClientset(t *testing.T) {
testConfigTokenFile := "test-config-file"
tmpDir, err := ioutil.TempDir("", "kubeadm-token-test")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tmpDir)
fullPath := filepath.Join(tmpDir, testConfigTokenFile)
// test dryRun = false on a non-exisiting file
if _, err = getClientset(fullPath, false); err == nil {
t.Errorf("getClientset(); dry-run: false; did no fail for test file %q: %v", fullPath, err)
}
// test dryRun = true on a non-exisiting file
if _, err = getClientset(fullPath, true); err == nil {
t.Errorf("getClientset(); dry-run: true; did no fail for test file %q: %v", fullPath, err)
}
f, err := os.Create(fullPath)
if err != nil {
t.Errorf("Unable to create test file %q: %v", fullPath, err)
}
defer f.Close()
if _, err = f.WriteString(testConfigToken); err != nil {
t.Errorf("Unable to write test file %q: %v", fullPath, err)
}
// test dryRun = true on an exisiting file
if _, err = getClientset(fullPath, true); err != nil {
t.Errorf("getClientset(); dry-run: true; failed for test file %q: %v", fullPath, err)
}
}
func TestRunDeleteTokens(t *testing.T) {
var buf bytes.Buffer
tmpDir, err := ioutil.TempDir("", "kubeadm-token-test")
if err != nil {
t.Errorf("Unable to create temporary directory: %v", err)
}
defer os.RemoveAll(tmpDir)
fullPath := filepath.Join(tmpDir, "test-config-file")
f, err := os.Create(fullPath)
if err != nil {
t.Errorf("Unable to create test file %q: %v", fullPath, err)
}
defer f.Close()
if _, err = f.WriteString(testConfigToken); err != nil {
t.Errorf("Unable to write test file %q: %v", fullPath, err)
}
client, err := getClientset(fullPath, true)
if err != nil {
t.Errorf("Unable to run getClientset() for test file %q: %v", fullPath, err)
}
// test valid; should not fail
// for some reason Secrets().Delete() does not fail even for this dummy config
if err = RunDeleteTokens(&buf, client, []string{"abcdef.1234567890123456", "abcdef.2345678901234567"}); err != nil {
t.Errorf("RunDeleteToken() failed for a valid token: %v", err)
}
// test invalid token; should fail
if err = RunDeleteTokens(&buf, client, []string{"invalid-token"}); err == nil {
t.Errorf("RunDeleteToken() succeeded for an invalid token: %v", err)
}
}
func TestTokenOutput(t *testing.T) {
testCases := []struct {
name string
id string
secret string
description string
usages []string
extraGroups []string
outputFormat string
expected string
}{
{
name: "JSON output",
id: "abcdef",
secret: "1234567890123456",
description: "valid bootstrap tooken",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:kubeadm:default-node-token"},
outputFormat: "json",
expected: `{
"kind": "BootstrapToken",
"apiVersion": "output.kubeadm.k8s.io/v1alpha1",
"token": "abcdef.1234567890123456",
"description": "valid bootstrap tooken",
"usages": [
"signing",
"authentication"
],
"groups": [
"system:bootstrappers:kubeadm:default-node-token"
]
}
`,
},
{
name: "YAML output",
id: "abcdef",
secret: "1234567890123456",
description: "valid bootstrap tooken",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:kubeadm:default-node-token"},
outputFormat: "yaml",
expected: `apiVersion: output.kubeadm.k8s.io/v1alpha1
description: valid bootstrap tooken
groups:
- system:bootstrappers:kubeadm:default-node-token
kind: BootstrapToken
token: abcdef.1234567890123456
usages:
- signing
- authentication
`,
},
{
name: "Go template output",
id: "abcdef",
secret: "1234567890123456",
description: "valid bootstrap tooken",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:kubeadm:default-node-token"},
outputFormat: "go-template={{println .token .description .usages .groups}}",
expected: `abcdef.1234567890123456 valid bootstrap tooken [signing authentication] [system:bootstrappers:kubeadm:default-node-token]
`,
},
{
name: "text output",
id: "abcdef",
secret: "1234567890123456",
description: "valid bootstrap tooken",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:kubeadm:default-node-token"},
outputFormat: "text",
expected: `TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS
abcdef.1234567890123456 <forever> <never> signing,authentication valid bootstrap tooken system:bootstrappers:kubeadm:default-node-token
`,
},
{
name: "jsonpath output",
id: "abcdef",
secret: "1234567890123456",
description: "valid bootstrap tooken",
usages: []string{"signing", "authentication"},
extraGroups: []string{"system:bootstrappers:kubeadm:default-node-token"},
outputFormat: "jsonpath={.token} {.groups}",
expected: "abcdef.1234567890123456 [\"system:bootstrappers:kubeadm:default-node-token\"]",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
token := outputapiv1alpha1.BootstrapToken{
BootstrapToken: kubeadmapiv1beta2.BootstrapToken{
Token: &kubeadmapiv1beta2.BootstrapTokenString{ID: tc.id, Secret: tc.secret},
Description: tc.description,
Usages: tc.usages,
Groups: tc.extraGroups,
},
}
buf := bytes.Buffer{}
outputFlags := output.NewOutputFlags(&tokenTextPrintFlags{}).WithTypeSetter(outputapischeme.Scheme).WithDefaultOutput(tc.outputFormat)
printer, err := outputFlags.ToPrinter()
if err != nil {
t.Errorf("can't create printer for output format %s: %+v", tc.outputFormat, err)
}
if err := printer.PrintObj(&token, &buf); err != nil {
t.Errorf("unable to print token %s: %+v", token.Token, err)
}
actual := buf.String()
if actual != tc.expected {
t.Errorf(
"failed TestTokenOutput:\n\nexpected:\n%s\n\nactual:\n%s", tc.expected, actual)
}
})
}
}
| cmd/kubeadm/app/cmd/token_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00017749532707966864,
0.00017070242029149085,
0.00016175603377632797,
0.00017161629511974752,
0.0000034607405723363627
] |
{
"id": 1,
"code_window": [
"\t\t\t\tDebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{\n",
"\t\t\t\t\tEnableProfiling: false,\n",
"\t\t\t\t\tEnableContentionProfiling: true,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &cpoptions.KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n",
"\t\t\t\tUseServiceAccountCredentials: true,\n",
"\t\t\t\tRouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "cmd/kube-controller-manager/app/options/options_test.go",
"type": "add",
"edit_start_line_idx": 199
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eviction
import (
"fmt"
"sort"
"sync"
"time"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/clock"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/tools/record"
v1helper "k8s.io/component-helpers/scheduling/corev1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
apiv1resource "k8s.io/kubernetes/pkg/api/v1/resource"
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/features"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
const (
podCleanupTimeout = 30 * time.Second
podCleanupPollFreq = time.Second
)
const (
// signalEphemeralContainerFsLimit is amount of storage available on filesystem requested by the container
signalEphemeralContainerFsLimit string = "ephemeralcontainerfs.limit"
// signalEphemeralPodFsLimit is amount of storage available on filesystem requested by the pod
signalEphemeralPodFsLimit string = "ephemeralpodfs.limit"
// signalEmptyDirFsLimit is amount of storage available on filesystem requested by an emptyDir
signalEmptyDirFsLimit string = "emptydirfs.limit"
)
// managerImpl implements Manager
type managerImpl struct {
// used to track time
clock clock.Clock
// config is how the manager is configured
config Config
// the function to invoke to kill a pod
killPodFunc KillPodFunc
// the function to get the mirror pod by a given statid pod
mirrorPodFunc MirrorPodFunc
// the interface that knows how to do image gc
imageGC ImageGC
// the interface that knows how to do container gc
containerGC ContainerGC
// protects access to internal state
sync.RWMutex
// node conditions are the set of conditions present
nodeConditions []v1.NodeConditionType
// captures when a node condition was last observed based on a threshold being met
nodeConditionsLastObservedAt nodeConditionsObservedAt
// nodeRef is a reference to the node
nodeRef *v1.ObjectReference
// used to record events about the node
recorder record.EventRecorder
// used to measure usage stats on system
summaryProvider stats.SummaryProvider
// records when a threshold was first observed
thresholdsFirstObservedAt thresholdsObservedAt
// records the set of thresholds that have been met (including graceperiod) but not yet resolved
thresholdsMet []evictionapi.Threshold
// signalToRankFunc maps a resource to ranking function for that resource.
signalToRankFunc map[evictionapi.Signal]rankFunc
// signalToNodeReclaimFuncs maps a resource to an ordered list of functions that know how to reclaim that resource.
signalToNodeReclaimFuncs map[evictionapi.Signal]nodeReclaimFuncs
// last observations from synchronize
lastObservations signalObservations
// dedicatedImageFs indicates if imagefs is on a separate device from the rootfs
dedicatedImageFs *bool
// thresholdNotifiers is a list of memory threshold notifiers which each notify for a memory eviction threshold
thresholdNotifiers []ThresholdNotifier
// thresholdsLastUpdated is the last time the thresholdNotifiers were updated.
thresholdsLastUpdated time.Time
}
// ensure it implements the required interface
var _ Manager = &managerImpl{}
// NewManager returns a configured Manager and an associated admission handler to enforce eviction configuration.
func NewManager(
summaryProvider stats.SummaryProvider,
config Config,
killPodFunc KillPodFunc,
mirrorPodFunc MirrorPodFunc,
imageGC ImageGC,
containerGC ContainerGC,
recorder record.EventRecorder,
nodeRef *v1.ObjectReference,
clock clock.Clock,
) (Manager, lifecycle.PodAdmitHandler) {
manager := &managerImpl{
clock: clock,
killPodFunc: killPodFunc,
mirrorPodFunc: mirrorPodFunc,
imageGC: imageGC,
containerGC: containerGC,
config: config,
recorder: recorder,
summaryProvider: summaryProvider,
nodeRef: nodeRef,
nodeConditionsLastObservedAt: nodeConditionsObservedAt{},
thresholdsFirstObservedAt: thresholdsObservedAt{},
dedicatedImageFs: nil,
thresholdNotifiers: []ThresholdNotifier{},
}
return manager, manager
}
// Admit rejects a pod if its not safe to admit for node stability.
func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
m.RLock()
defer m.RUnlock()
if len(m.nodeConditions) == 0 {
return lifecycle.PodAdmitResult{Admit: true}
}
// Admit Critical pods even under resource pressure since they are required for system stability.
// https://github.com/kubernetes/kubernetes/issues/40573 has more details.
if kubelettypes.IsCriticalPod(attrs.Pod) {
return lifecycle.PodAdmitResult{Admit: true}
}
// Conditions other than memory pressure reject all pods
nodeOnlyHasMemoryPressureCondition := hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) && len(m.nodeConditions) == 1
if nodeOnlyHasMemoryPressureCondition {
notBestEffort := v1.PodQOSBestEffort != v1qos.GetPodQOS(attrs.Pod)
if notBestEffort {
return lifecycle.PodAdmitResult{Admit: true}
}
// When node has memory pressure, check BestEffort Pod's toleration:
// admit it if tolerates memory pressure taint, fail for other tolerations, e.g. DiskPressure.
if v1helper.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{
Key: v1.TaintNodeMemoryPressure,
Effect: v1.TaintEffectNoSchedule,
}) {
return lifecycle.PodAdmitResult{Admit: true}
}
}
// reject pods when under memory pressure (if pod is best effort), or if under disk pressure.
klog.InfoS("Failed to admit pod to node", "pod", klog.KObj(attrs.Pod), "nodeCondition", m.nodeConditions)
return lifecycle.PodAdmitResult{
Admit: false,
Reason: Reason,
Message: fmt.Sprintf(nodeConditionMessageFmt, m.nodeConditions),
}
}
// Start starts the control loop to observe and response to low compute resources.
func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, podCleanedUpFunc PodCleanedUpFunc, monitoringInterval time.Duration) {
thresholdHandler := func(message string) {
klog.InfoS(message)
m.synchronize(diskInfoProvider, podFunc)
}
if m.config.KernelMemcgNotification {
for _, threshold := range m.config.Thresholds {
if threshold.Signal == evictionapi.SignalMemoryAvailable || threshold.Signal == evictionapi.SignalAllocatableMemoryAvailable {
notifier, err := NewMemoryThresholdNotifier(threshold, m.config.PodCgroupRoot, &CgroupNotifierFactory{}, thresholdHandler)
if err != nil {
klog.InfoS("Eviction manager: failed to create memory threshold notifier", "err", err)
} else {
go notifier.Start()
m.thresholdNotifiers = append(m.thresholdNotifiers, notifier)
}
}
}
}
// start the eviction manager monitoring
go func() {
for {
if evictedPods := m.synchronize(diskInfoProvider, podFunc); evictedPods != nil {
klog.InfoS("Eviction manager: pods evicted, waiting for pod to be cleaned up", "pods", format.Pods(evictedPods))
m.waitForPodsCleanup(podCleanedUpFunc, evictedPods)
} else {
time.Sleep(monitoringInterval)
}
}
}()
}
// IsUnderMemoryPressure returns true if the node is under memory pressure.
func (m *managerImpl) IsUnderMemoryPressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure)
}
// IsUnderDiskPressure returns true if the node is under disk pressure.
func (m *managerImpl) IsUnderDiskPressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, v1.NodeDiskPressure)
}
// IsUnderPIDPressure returns true if the node is under PID pressure.
func (m *managerImpl) IsUnderPIDPressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, v1.NodePIDPressure)
}
// synchronize is the main control loop that enforces eviction thresholds.
// Returns the pod that was killed, or nil if no pod was killed.
func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) []*v1.Pod {
// if we have nothing to do, just return
thresholds := m.config.Thresholds
if len(thresholds) == 0 && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
return nil
}
klog.V(3).InfoS("Eviction manager: synchronize housekeeping")
// build the ranking functions (if not yet known)
// TODO: have a function in cadvisor that lets us know if global housekeeping has completed
if m.dedicatedImageFs == nil {
hasImageFs, ok := diskInfoProvider.HasDedicatedImageFs()
if ok != nil {
return nil
}
m.dedicatedImageFs = &hasImageFs
m.signalToRankFunc = buildSignalToRankFunc(hasImageFs)
m.signalToNodeReclaimFuncs = buildSignalToNodeReclaimFuncs(m.imageGC, m.containerGC, hasImageFs)
}
activePods := podFunc()
updateStats := true
summary, err := m.summaryProvider.Get(updateStats)
if err != nil {
klog.ErrorS(err, "Eviction manager: failed to get summary stats")
return nil
}
if m.clock.Since(m.thresholdsLastUpdated) > notifierRefreshInterval {
m.thresholdsLastUpdated = m.clock.Now()
for _, notifier := range m.thresholdNotifiers {
if err := notifier.UpdateThreshold(summary); err != nil {
klog.InfoS("Eviction manager: failed to update notifier", "notifier", notifier.Description(), "err", err)
}
}
}
// make observations and get a function to derive pod usage stats relative to those observations.
observations, statsFunc := makeSignalObservations(summary)
debugLogObservations("observations", observations)
// determine the set of thresholds met independent of grace period
thresholds = thresholdsMet(thresholds, observations, false)
debugLogThresholdsWithObservation("thresholds - ignoring grace period", thresholds, observations)
// determine the set of thresholds previously met that have not yet satisfied the associated min-reclaim
if len(m.thresholdsMet) > 0 {
thresholdsNotYetResolved := thresholdsMet(m.thresholdsMet, observations, true)
thresholds = mergeThresholds(thresholds, thresholdsNotYetResolved)
}
debugLogThresholdsWithObservation("thresholds - reclaim not satisfied", thresholds, observations)
// track when a threshold was first observed
now := m.clock.Now()
thresholdsFirstObservedAt := thresholdsFirstObservedAt(thresholds, m.thresholdsFirstObservedAt, now)
// the set of node conditions that are triggered by currently observed thresholds
nodeConditions := nodeConditions(thresholds)
if len(nodeConditions) > 0 {
klog.V(3).InfoS("Eviction manager: node conditions - observed", "nodeCondition", nodeConditions)
}
// track when a node condition was last observed
nodeConditionsLastObservedAt := nodeConditionsLastObservedAt(nodeConditions, m.nodeConditionsLastObservedAt, now)
// node conditions report true if it has been observed within the transition period window
nodeConditions = nodeConditionsObservedSince(nodeConditionsLastObservedAt, m.config.PressureTransitionPeriod, now)
if len(nodeConditions) > 0 {
klog.V(3).InfoS("Eviction manager: node conditions - transition period not met", "nodeCondition", nodeConditions)
}
// determine the set of thresholds we need to drive eviction behavior (i.e. all grace periods are met)
thresholds = thresholdsMetGracePeriod(thresholdsFirstObservedAt, now)
debugLogThresholdsWithObservation("thresholds - grace periods satisfied", thresholds, observations)
// update internal state
m.Lock()
m.nodeConditions = nodeConditions
m.thresholdsFirstObservedAt = thresholdsFirstObservedAt
m.nodeConditionsLastObservedAt = nodeConditionsLastObservedAt
m.thresholdsMet = thresholds
// determine the set of thresholds whose stats have been updated since the last sync
thresholds = thresholdsUpdatedStats(thresholds, observations, m.lastObservations)
debugLogThresholdsWithObservation("thresholds - updated stats", thresholds, observations)
m.lastObservations = observations
m.Unlock()
// evict pods if there is a resource usage violation from local volume temporary storage
// If eviction happens in localStorageEviction function, skip the rest of eviction action
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
if evictedPods := m.localStorageEviction(activePods, statsFunc); len(evictedPods) > 0 {
return evictedPods
}
}
if len(thresholds) == 0 {
klog.V(3).InfoS("Eviction manager: no resources are starved")
return nil
}
// rank the thresholds by eviction priority
sort.Sort(byEvictionPriority(thresholds))
thresholdToReclaim, resourceToReclaim, foundAny := getReclaimableThreshold(thresholds)
if !foundAny {
return nil
}
klog.InfoS("Eviction manager: attempting to reclaim", "resourceName", resourceToReclaim)
// record an event about the resources we are now attempting to reclaim via eviction
m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim)
// check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods.
if m.reclaimNodeLevelResources(thresholdToReclaim.Signal, resourceToReclaim) {
klog.InfoS("Eviction manager: able to reduce resource pressure without evicting pods.", "resourceName", resourceToReclaim)
return nil
}
klog.InfoS("Eviction manager: must evict pod(s) to reclaim", "resourceName", resourceToReclaim)
// rank the pods for eviction
rank, ok := m.signalToRankFunc[thresholdToReclaim.Signal]
if !ok {
klog.ErrorS(nil, "Eviction manager: no ranking function for signal", "threshold", thresholdToReclaim.Signal)
return nil
}
// the only candidates viable for eviction are those pods that had anything running.
if len(activePods) == 0 {
klog.ErrorS(nil, "Eviction manager: eviction thresholds have been met, but no pods are active to evict")
return nil
}
// rank the running pods for eviction for the specified resource
rank(activePods, statsFunc)
klog.InfoS("Eviction manager: pods ranked for eviction", "pods", format.Pods(activePods))
//record age of metrics for met thresholds that we are using for evictions.
for _, t := range thresholds {
timeObserved := observations[t.Signal].time
if !timeObserved.IsZero() {
metrics.EvictionStatsAge.WithLabelValues(string(t.Signal)).Observe(metrics.SinceInSeconds(timeObserved.Time))
}
}
// we kill at most a single pod during each eviction interval
for i := range activePods {
pod := activePods[i]
gracePeriodOverride := int64(0)
if !isHardEvictionThreshold(thresholdToReclaim) {
gracePeriodOverride = m.config.MaxPodGracePeriodSeconds
}
message, annotations := evictionMessage(resourceToReclaim, pod, statsFunc)
if m.evictPod(pod, gracePeriodOverride, message, annotations) {
metrics.Evictions.WithLabelValues(string(thresholdToReclaim.Signal)).Inc()
return []*v1.Pod{pod}
}
}
klog.InfoS("Eviction manager: unable to evict any pods from the node")
return nil
}
func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods []*v1.Pod) {
timeout := m.clock.NewTimer(podCleanupTimeout)
defer timeout.Stop()
ticker := m.clock.NewTicker(podCleanupPollFreq)
defer ticker.Stop()
for {
select {
case <-timeout.C():
klog.InfoS("Eviction manager: timed out waiting for pods to be cleaned up", "pods", format.Pods(pods))
return
case <-ticker.C():
for i, pod := range pods {
if !podCleanedUpFunc(pod) {
break
}
if i == len(pods)-1 {
klog.InfoS("Eviction manager: pods successfully cleaned up", "pods", format.Pods(pods))
return
}
}
}
}
}
// reclaimNodeLevelResources attempts to reclaim node level resources. returns true if thresholds were satisfied and no pod eviction is required.
func (m *managerImpl) reclaimNodeLevelResources(signalToReclaim evictionapi.Signal, resourceToReclaim v1.ResourceName) bool {
nodeReclaimFuncs := m.signalToNodeReclaimFuncs[signalToReclaim]
for _, nodeReclaimFunc := range nodeReclaimFuncs {
// attempt to reclaim the pressured resource.
if err := nodeReclaimFunc(); err != nil {
klog.InfoS("Eviction manager: unexpected error when attempting to reduce resource pressure", "resourceName", resourceToReclaim, "err", err)
}
}
if len(nodeReclaimFuncs) > 0 {
summary, err := m.summaryProvider.Get(true)
if err != nil {
klog.ErrorS(err, "Eviction manager: failed to get summary stats after resource reclaim")
return false
}
// make observations and get a function to derive pod usage stats relative to those observations.
observations, _ := makeSignalObservations(summary)
debugLogObservations("observations after resource reclaim", observations)
// determine the set of thresholds met independent of grace period
thresholds := thresholdsMet(m.config.Thresholds, observations, false)
debugLogThresholdsWithObservation("thresholds after resource reclaim - ignoring grace period", thresholds, observations)
if len(thresholds) == 0 {
return true
}
}
return false
}
// localStorageEviction checks the EmptyDir volume usage for each pod and determine whether it exceeds the specified limit and needs
// to be evicted. It also checks every container in the pod, if the container overlay usage exceeds the limit, the pod will be evicted too.
func (m *managerImpl) localStorageEviction(pods []*v1.Pod, statsFunc statsFunc) []*v1.Pod {
evicted := []*v1.Pod{}
for _, pod := range pods {
podStats, ok := statsFunc(pod)
if !ok {
continue
}
if m.emptyDirLimitEviction(podStats, pod) {
evicted = append(evicted, pod)
continue
}
if m.podEphemeralStorageLimitEviction(podStats, pod) {
evicted = append(evicted, pod)
continue
}
if m.containerEphemeralStorageLimitEviction(podStats, pod) {
evicted = append(evicted, pod)
}
}
return evicted
}
func (m *managerImpl) emptyDirLimitEviction(podStats statsapi.PodStats, pod *v1.Pod) bool {
podVolumeUsed := make(map[string]*resource.Quantity)
for _, volume := range podStats.VolumeStats {
podVolumeUsed[volume.Name] = resource.NewQuantity(int64(*volume.UsedBytes), resource.BinarySI)
}
for i := range pod.Spec.Volumes {
source := &pod.Spec.Volumes[i].VolumeSource
if source.EmptyDir != nil {
size := source.EmptyDir.SizeLimit
used := podVolumeUsed[pod.Spec.Volumes[i].Name]
if used != nil && size != nil && size.Sign() == 1 && used.Cmp(*size) > 0 {
// the emptyDir usage exceeds the size limit, evict the pod
if m.evictPod(pod, 0, fmt.Sprintf(emptyDirMessageFmt, pod.Spec.Volumes[i].Name, size.String()), nil) {
metrics.Evictions.WithLabelValues(signalEmptyDirFsLimit).Inc()
return true
}
return false
}
}
}
return false
}
func (m *managerImpl) podEphemeralStorageLimitEviction(podStats statsapi.PodStats, pod *v1.Pod) bool {
_, podLimits := apiv1resource.PodRequestsAndLimits(pod)
_, found := podLimits[v1.ResourceEphemeralStorage]
if !found {
return false
}
// pod stats api summarizes ephemeral storage usage (container, emptyDir, host[etc-hosts, logs])
podEphemeralStorageTotalUsage := &resource.Quantity{}
if podStats.EphemeralStorage != nil && podStats.EphemeralStorage.UsedBytes != nil {
podEphemeralStorageTotalUsage = resource.NewQuantity(int64(*podStats.EphemeralStorage.UsedBytes), resource.BinarySI)
}
podEphemeralStorageLimit := podLimits[v1.ResourceEphemeralStorage]
if podEphemeralStorageTotalUsage.Cmp(podEphemeralStorageLimit) > 0 {
// the total usage of pod exceeds the total size limit of containers, evict the pod
if m.evictPod(pod, 0, fmt.Sprintf(podEphemeralStorageMessageFmt, podEphemeralStorageLimit.String()), nil) {
metrics.Evictions.WithLabelValues(signalEphemeralPodFsLimit).Inc()
return true
}
return false
}
return false
}
func (m *managerImpl) containerEphemeralStorageLimitEviction(podStats statsapi.PodStats, pod *v1.Pod) bool {
thresholdsMap := make(map[string]*resource.Quantity)
for _, container := range pod.Spec.Containers {
ephemeralLimit := container.Resources.Limits.StorageEphemeral()
if ephemeralLimit != nil && ephemeralLimit.Value() != 0 {
thresholdsMap[container.Name] = ephemeralLimit
}
}
for _, containerStat := range podStats.Containers {
containerUsed := diskUsage(containerStat.Logs)
if !*m.dedicatedImageFs {
containerUsed.Add(*diskUsage(containerStat.Rootfs))
}
if ephemeralStorageThreshold, ok := thresholdsMap[containerStat.Name]; ok {
if ephemeralStorageThreshold.Cmp(*containerUsed) < 0 {
if m.evictPod(pod, 0, fmt.Sprintf(containerEphemeralStorageMessageFmt, containerStat.Name, ephemeralStorageThreshold.String()), nil) {
metrics.Evictions.WithLabelValues(signalEphemeralContainerFsLimit).Inc()
return true
}
return false
}
}
}
return false
}
func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg string, annotations map[string]string) bool {
// If the pod is marked as critical and static, and support for critical pod annotations is enabled,
// do not evict such pods. Static pods are not re-admitted after evictions.
// https://github.com/kubernetes/kubernetes/issues/40573 has more details.
if kubelettypes.IsCriticalPod(pod) {
klog.ErrorS(nil, "Eviction manager: cannot evict a critical pod", "pod", klog.KObj(pod))
return false
}
status := v1.PodStatus{
Phase: v1.PodFailed,
Message: evictMsg,
Reason: Reason,
}
// record that we are evicting the pod
m.recorder.AnnotatedEventf(pod, annotations, v1.EventTypeWarning, Reason, evictMsg)
// this is a blocking call and should only return when the pod and its containers are killed.
err := m.killPodFunc(pod, status, &gracePeriodOverride)
if err != nil {
klog.ErrorS(err, "Eviction manager: pod failed to evict", "pod", klog.KObj(pod))
} else {
klog.InfoS("Eviction manager: pod is evicted successfully", "pod", klog.KObj(pod))
}
return true
}
| pkg/kubelet/eviction/eviction_manager.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00017799140186980367,
0.00016976067854557186,
0.00015841537970118225,
0.00017017481150105596,
0.000004177956725470722
] |
{
"id": 2,
"code_window": [
"\tserviceconfig \"k8s.io/cloud-provider/controllers/service/config\"\n",
"\tcomponentbaseconfig \"k8s.io/component-base/config\"\n",
"\tcmconfig \"k8s.io/controller-manager/config\"\n",
"\tcmoptions \"k8s.io/controller-manager/options\"\n",
")\n",
"\n",
"func TestDefaultFlags(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmigration \"k8s.io/controller-manager/pkg/leadermigration/options\"\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 33
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/config/options"
cmconfig "k8s.io/controller-manager/config"
migration "k8s.io/controller-manager/pkg/leadermigration/options"
)
// GenericControllerManagerConfigurationOptions holds the options which are generic.
type GenericControllerManagerConfigurationOptions struct {
*cmconfig.GenericControllerManagerConfiguration
Debugging *DebuggingOptions
// LeaderMigration is the options for leader migration, a nil indicates default options should be applied.
LeaderMigration *migration.LeaderMigrationOptions
}
// NewGenericControllerManagerConfigurationOptions returns generic configuration default values for both
// the kube-controller-manager and the cloud-contoller-manager. Any common changes should
// be made here. Any individual changes should be made in that controller.
func NewGenericControllerManagerConfigurationOptions(cfg *cmconfig.GenericControllerManagerConfiguration) *GenericControllerManagerConfigurationOptions {
o := &GenericControllerManagerConfigurationOptions{
GenericControllerManagerConfiguration: cfg,
Debugging: RecommendedDebuggingOptions(),
LeaderMigration: nil,
}
return o
}
// AddFlags adds flags related to generic for controller manager to the specified FlagSet.
func (o *GenericControllerManagerConfigurationOptions) AddFlags(fss *cliflag.NamedFlagSets, allControllers, disabledByDefaultControllers []string) {
if o == nil {
return
}
o.Debugging.AddFlags(fss.FlagSet("debugging"))
o.LeaderMigration.AddFlags(fss.FlagSet("leader-migration"))
genericfs := fss.FlagSet("generic")
genericfs.DurationVar(&o.MinResyncPeriod.Duration, "min-resync-period", o.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.")
genericfs.StringVar(&o.ClientConnection.ContentType, "kube-api-content-type", o.ClientConnection.ContentType, "Content type of requests sent to apiserver.")
genericfs.Float32Var(&o.ClientConnection.QPS, "kube-api-qps", o.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver.")
genericfs.Int32Var(&o.ClientConnection.Burst, "kube-api-burst", o.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver.")
genericfs.DurationVar(&o.ControllerStartInterval.Duration, "controller-start-interval", o.ControllerStartInterval.Duration, "Interval between starting controller managers.")
genericfs.StringSliceVar(&o.Controllers, "controllers", o.Controllers, fmt.Sprintf(""+
"A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+
"named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s",
strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", ")))
options.BindLeaderElectionFlags(&o.LeaderElection, genericfs)
}
// ApplyTo fills up generic config with options.
func (o *GenericControllerManagerConfigurationOptions) ApplyTo(cfg *cmconfig.GenericControllerManagerConfiguration) error {
if o == nil {
return nil
}
if err := o.Debugging.ApplyTo(&cfg.Debugging); err != nil {
return err
}
if err := o.LeaderMigration.ApplyTo(cfg); err != nil {
return err
}
cfg.Port = o.Port
cfg.Address = o.Address
cfg.MinResyncPeriod = o.MinResyncPeriod
cfg.ClientConnection = o.ClientConnection
cfg.ControllerStartInterval = o.ControllerStartInterval
cfg.LeaderElection = o.LeaderElection
cfg.Controllers = o.Controllers
return nil
}
// Validate checks validation of GenericOptions.
func (o *GenericControllerManagerConfigurationOptions) Validate(allControllers []string, disabledByDefaultControllers []string) []error {
if o == nil {
return nil
}
errs := []error{}
errs = append(errs, o.Debugging.Validate()...)
allControllersSet := sets.NewString(allControllers...)
for _, controller := range o.Controllers {
if controller == "*" {
continue
}
controller = strings.TrimPrefix(controller, "-")
if !allControllersSet.Has(controller) {
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller))
}
}
return errs
}
| staging/src/k8s.io/controller-manager/options/generic.go | 1 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.0030019599944353104,
0.0007986955461092293,
0.00016969285206869245,
0.00020763531210832298,
0.0009586758678779006
] |
{
"id": 2,
"code_window": [
"\tserviceconfig \"k8s.io/cloud-provider/controllers/service/config\"\n",
"\tcomponentbaseconfig \"k8s.io/component-base/config\"\n",
"\tcmconfig \"k8s.io/controller-manager/config\"\n",
"\tcmoptions \"k8s.io/controller-manager/options\"\n",
")\n",
"\n",
"func TestDefaultFlags(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmigration \"k8s.io/controller-manager/pkg/leadermigration/options\"\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 33
} | /*
Copyright (c) 2018 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
vUnless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tags
import (
"context"
"fmt"
"net/http"
"github.com/vmware/govmomi/vapi/internal"
"github.com/vmware/govmomi/vim25/mo"
)
func (c *Manager) tagID(ctx context.Context, id string) (string, error) {
if isName(id) {
tag, err := c.GetTag(ctx, id)
if err != nil {
return "", err
}
return tag.ID, nil
}
return id, nil
}
// AttachTag attaches a tag ID to a managed object.
func (c *Manager) AttachTag(ctx context.Context, tagID string, ref mo.Reference) error {
id, err := c.tagID(ctx, tagID)
if err != nil {
return err
}
spec := internal.NewAssociation(ref)
url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("attach")
return c.Do(ctx, url.Request(http.MethodPost, spec), nil)
}
// DetachTag detaches a tag ID from a managed object.
// If the tag is already removed from the object, then this operation is a no-op and an error will not be thrown.
func (c *Manager) DetachTag(ctx context.Context, tagID string, ref mo.Reference) error {
id, err := c.tagID(ctx, tagID)
if err != nil {
return err
}
spec := internal.NewAssociation(ref)
url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("detach")
return c.Do(ctx, url.Request(http.MethodPost, spec), nil)
}
// ListAttachedTags fetches the array of tag IDs attached to the given object.
func (c *Manager) ListAttachedTags(ctx context.Context, ref mo.Reference) ([]string, error) {
spec := internal.NewAssociation(ref)
url := internal.URL(c, internal.AssociationPath).WithAction("list-attached-tags")
var res []string
return res, c.Do(ctx, url.Request(http.MethodPost, spec), &res)
}
// GetAttachedTags fetches the array of tags attached to the given object.
func (c *Manager) GetAttachedTags(ctx context.Context, ref mo.Reference) ([]Tag, error) {
ids, err := c.ListAttachedTags(ctx, ref)
if err != nil {
return nil, fmt.Errorf("get attached tags %s: %s", ref, err)
}
var info []Tag
for _, id := range ids {
tag, err := c.GetTag(ctx, id)
if err != nil {
return nil, fmt.Errorf("get tag %s: %s", id, err)
}
info = append(info, *tag)
}
return info, nil
}
// ListAttachedObjects fetches the array of attached objects for the given tag ID.
func (c *Manager) ListAttachedObjects(ctx context.Context, tagID string) ([]mo.Reference, error) {
id, err := c.tagID(ctx, tagID)
if err != nil {
return nil, err
}
url := internal.URL(c, internal.AssociationPath).WithID(id).WithAction("list-attached-objects")
var res []internal.AssociatedObject
if err := c.Do(ctx, url.Request(http.MethodPost, nil), &res); err != nil {
return nil, err
}
refs := make([]mo.Reference, len(res))
for i := range res {
refs[i] = res[i]
}
return refs, nil
}
| vendor/github.com/vmware/govmomi/vapi/tags/tag_association.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00017834643949754536,
0.00017261008906643838,
0.0001652993232710287,
0.00017281559121329337,
0.0000043089276005048305
] |
{
"id": 2,
"code_window": [
"\tserviceconfig \"k8s.io/cloud-provider/controllers/service/config\"\n",
"\tcomponentbaseconfig \"k8s.io/component-base/config\"\n",
"\tcmconfig \"k8s.io/controller-manager/config\"\n",
"\tcmoptions \"k8s.io/controller-manager/options\"\n",
")\n",
"\n",
"func TestDefaultFlags(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmigration \"k8s.io/controller-manager/pkg/leadermigration/options\"\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 33
} | // +build !windows
package godirwalk
import (
"os"
"syscall"
"unsafe"
)
// Scanner is an iterator to enumerate the contents of a directory.
type Scanner struct {
scratchBuffer []byte // read directory bytes from file system into this buffer
workBuffer []byte // points into scratchBuffer, from which we chunk out directory entries
osDirname string
childName string
err error // err is the error associated with scanning directory
statErr error // statErr is any error return while attempting to stat an entry
dh *os.File // used to close directory after done reading
de *Dirent // most recently decoded directory entry
sde syscall.Dirent
fd int // file descriptor used to read entries from directory
}
// NewScanner returns a new directory Scanner that lazily enumerates the
// contents of a single directory.
//
// scanner, err := godirwalk.NewScanner(dirname)
// if err != nil {
// fatal("cannot scan directory: %s", err)
// }
//
// for scanner.Scan() {
// dirent, err := scanner.Dirent()
// if err != nil {
// warning("cannot get dirent: %s", err)
// continue
// }
// name := dirent.Name()
// if name == "break" {
// break
// }
// if name == "continue" {
// continue
// }
// fmt.Printf("%v %v\n", dirent.ModeType(), dirent.Name())
// }
// if err := scanner.Err(); err != nil {
// fatal("cannot scan directory: %s", err)
// }
func NewScanner(osDirname string) (*Scanner, error) {
return NewScannerWithScratchBuffer(osDirname, nil)
}
// NewScannerWithScratchBuffer returns a new directory Scanner that lazily
// enumerates the contents of a single directory. On platforms other than
// Windows it uses the provided scratch buffer to read from the file system. On
// Windows the scratch buffer is ignored.
func NewScannerWithScratchBuffer(osDirname string, scratchBuffer []byte) (*Scanner, error) {
dh, err := os.Open(osDirname)
if err != nil {
return nil, err
}
if len(scratchBuffer) < MinimumScratchBufferSize {
scratchBuffer = newScratchBuffer()
}
scanner := &Scanner{
scratchBuffer: scratchBuffer,
osDirname: osDirname,
dh: dh,
fd: int(dh.Fd()),
}
return scanner, nil
}
// Dirent returns the current directory entry while scanning a directory.
func (s *Scanner) Dirent() (*Dirent, error) {
if s.de == nil {
s.de = &Dirent{name: s.childName, path: s.osDirname}
s.de.modeType, s.statErr = modeTypeFromDirent(&s.sde, s.osDirname, s.childName)
}
return s.de, s.statErr
}
// done is called when directory scanner unable to continue, with either the
// triggering error, or nil when there are simply no more entries to read from
// the directory.
func (s *Scanner) done(err error) {
if s.dh == nil {
return
}
if cerr := s.dh.Close(); err == nil {
s.err = cerr
}
s.osDirname, s.childName = "", ""
s.scratchBuffer, s.workBuffer = nil, nil
s.dh, s.de, s.statErr = nil, nil, nil
s.sde = syscall.Dirent{}
s.fd = 0
}
// Err returns any error associated with scanning a directory. It is normal to
// call Err after Scan returns false, even though they both ensure Scanner
// resources are released. Do not call until done scanning a directory.
func (s *Scanner) Err() error {
s.done(nil)
return s.err
}
// Name returns the base name of the current directory entry while scanning a
// directory.
func (s *Scanner) Name() string { return s.childName }
// Scan potentially reads and then decodes the next directory entry from the
// file system.
//
// When it returns false, this releases resources used by the Scanner then
// returns any error associated with closing the file system directory resource.
func (s *Scanner) Scan() bool {
if s.dh == nil {
return false
}
s.de = nil
for {
// When the work buffer has nothing remaining to decode, we need to load
// more data from disk.
if len(s.workBuffer) == 0 {
n, err := syscall.ReadDirent(s.fd, s.scratchBuffer)
// n, err := unix.ReadDirent(s.fd, s.scratchBuffer)
if err != nil {
if err == syscall.EINTR /* || err == unix.EINTR */ {
continue
}
s.done(err)
return false
}
if n <= 0 { // end of directory: normal exit
s.done(nil)
return false
}
s.workBuffer = s.scratchBuffer[:n] // trim work buffer to number of bytes read
}
// point entry to first syscall.Dirent in buffer
copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(&s.sde))[:], s.workBuffer)
s.workBuffer = s.workBuffer[reclen(&s.sde):] // advance buffer for next iteration through loop
if inoFromDirent(&s.sde) == 0 {
continue // inode set to 0 indicates an entry that was marked as deleted
}
nameSlice := nameFromDirent(&s.sde)
nameLength := len(nameSlice)
if nameLength == 0 || (nameSlice[0] == '.' && (nameLength == 1 || (nameLength == 2 && nameSlice[1] == '.'))) {
continue
}
s.childName = string(nameSlice)
return true
}
}
| vendor/github.com/karrick/godirwalk/scandir_unix.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00039060405106283724,
0.00018482677114661783,
0.00016153886099345982,
0.00017505117284599692,
0.00005169810901861638
] |
{
"id": 2,
"code_window": [
"\tserviceconfig \"k8s.io/cloud-provider/controllers/service/config\"\n",
"\tcomponentbaseconfig \"k8s.io/component-base/config\"\n",
"\tcmconfig \"k8s.io/controller-manager/config\"\n",
"\tcmoptions \"k8s.io/controller-manager/options\"\n",
")\n",
"\n",
"func TestDefaultFlags(t *testing.T) {\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tmigration \"k8s.io/controller-manager/pkg/leadermigration/options\"\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 33
} | inverseRules:
# Allow Internal packages only in apiextensions-apiserver itself, discourage use elsewhere.
- selectorRegexp: k8s[.]io/apiextensions-apiserver
allowedPrefixes:
- ''
# Forbid use of this package in other k8s.io packages.
- selectorRegexp: k8s[.]io
forbiddenPrefixes:
- ''
| staging/src/k8s.io/apiextensions-apiserver/pkg/apis/.import-restrictions | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.0001685144961811602,
0.0001685144961811602,
0.0001685144961811602,
0.0001685144961811602,
0
] |
{
"id": 3,
"code_window": [
"\t\t\t\t\tEnableContentionProfiling: false,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 67
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"net"
"reflect"
"sort"
"testing"
"time"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
apiserveroptions "k8s.io/apiserver/pkg/server/options"
cpconfig "k8s.io/cloud-provider/config"
serviceconfig "k8s.io/cloud-provider/controllers/service/config"
cpoptions "k8s.io/cloud-provider/options"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/component-base/logs"
"k8s.io/component-base/metrics"
cmconfig "k8s.io/controller-manager/config"
cmoptions "k8s.io/controller-manager/options"
kubecontrollerconfig "k8s.io/kubernetes/cmd/kube-controller-manager/app/config"
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
csrsigningconfig "k8s.io/kubernetes/pkg/controller/certificates/signer/config"
cronjobconfig "k8s.io/kubernetes/pkg/controller/cronjob/config"
daemonconfig "k8s.io/kubernetes/pkg/controller/daemon/config"
deploymentconfig "k8s.io/kubernetes/pkg/controller/deployment/config"
endpointconfig "k8s.io/kubernetes/pkg/controller/endpoint/config"
endpointsliceconfig "k8s.io/kubernetes/pkg/controller/endpointslice/config"
endpointslicemirroringconfig "k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config"
garbagecollectorconfig "k8s.io/kubernetes/pkg/controller/garbagecollector/config"
jobconfig "k8s.io/kubernetes/pkg/controller/job/config"
namespaceconfig "k8s.io/kubernetes/pkg/controller/namespace/config"
nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config"
nodelifecycleconfig "k8s.io/kubernetes/pkg/controller/nodelifecycle/config"
poautosclerconfig "k8s.io/kubernetes/pkg/controller/podautoscaler/config"
podgcconfig "k8s.io/kubernetes/pkg/controller/podgc/config"
replicasetconfig "k8s.io/kubernetes/pkg/controller/replicaset/config"
replicationconfig "k8s.io/kubernetes/pkg/controller/replication/config"
resourcequotaconfig "k8s.io/kubernetes/pkg/controller/resourcequota/config"
serviceaccountconfig "k8s.io/kubernetes/pkg/controller/serviceaccount/config"
statefulsetconfig "k8s.io/kubernetes/pkg/controller/statefulset/config"
ttlafterfinishedconfig "k8s.io/kubernetes/pkg/controller/ttlafterfinished/config"
attachdetachconfig "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config"
persistentvolumeconfig "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config"
)
var args = []string{
"--address=192.168.4.10",
"--allocate-node-cidrs=true",
"--attach-detach-reconcile-sync-period=30s",
"--cidr-allocator-type=CloudAllocator",
"--cloud-config=/cloud-config",
"--cloud-provider=gce",
"--cluster-cidr=1.2.3.4/24",
"--cluster-name=k8s",
"--cluster-signing-cert-file=/cluster-signing-cert",
"--cluster-signing-key-file=/cluster-signing-key",
"--cluster-signing-kubelet-serving-cert-file=/cluster-signing-kubelet-serving/cert-file",
"--cluster-signing-kubelet-serving-key-file=/cluster-signing-kubelet-serving/key-file",
"--cluster-signing-kubelet-client-cert-file=/cluster-signing-kubelet-client/cert-file",
"--cluster-signing-kubelet-client-key-file=/cluster-signing-kubelet-client/key-file",
"--cluster-signing-kube-apiserver-client-cert-file=/cluster-signing-kube-apiserver-client/cert-file",
"--cluster-signing-kube-apiserver-client-key-file=/cluster-signing-kube-apiserver-client/key-file",
"--cluster-signing-legacy-unknown-cert-file=/cluster-signing-legacy-unknown/cert-file",
"--cluster-signing-legacy-unknown-key-file=/cluster-signing-legacy-unknown/key-file",
"--concurrent-deployment-syncs=10",
"--concurrent-statefulset-syncs=15",
"--concurrent-endpoint-syncs=10",
"--concurrent-service-endpoint-syncs=10",
"--concurrent-gc-syncs=30",
"--concurrent-namespace-syncs=20",
"--concurrent-replicaset-syncs=10",
"--concurrent-resource-quota-syncs=10",
"--concurrent-service-syncs=2",
"--concurrent-serviceaccount-token-syncs=10",
"--concurrent_rc_syncs=10",
"--configure-cloud-routes=false",
"--contention-profiling=true",
"--controller-start-interval=2m",
"--controllers=foo,bar",
"--deployment-controller-sync-period=45s",
"--disable-attach-detach-reconcile-sync=true",
"--enable-dynamic-provisioning=false",
"--enable-garbage-collector=false",
"--enable-hostpath-provisioner=true",
"--enable-taint-manager=false",
"--cluster-signing-duration=10h",
"--flex-volume-plugin-dir=/flex-volume-plugin",
"--volume-host-cidr-denylist=127.0.0.1/28,feed::/16",
"--volume-host-allow-local-loopback=false",
"--horizontal-pod-autoscaler-downscale-delay=2m",
"--horizontal-pod-autoscaler-sync-period=45s",
"--horizontal-pod-autoscaler-upscale-delay=1m",
"--horizontal-pod-autoscaler-downscale-stabilization=3m",
"--horizontal-pod-autoscaler-cpu-initialization-period=90s",
"--horizontal-pod-autoscaler-initial-readiness-delay=50s",
"--http2-max-streams-per-connection=47",
"--kube-api-burst=100",
"--kube-api-content-type=application/json",
"--kube-api-qps=50.0",
"--kubeconfig=/kubeconfig",
"--large-cluster-size-threshold=100",
"--leader-elect=false",
"--leader-elect-lease-duration=30s",
"--leader-elect-renew-deadline=15s",
"--leader-elect-resource-lock=configmap",
"--leader-elect-retry-period=5s",
"--master=192.168.4.20",
"--max-endpoints-per-slice=200",
"--min-resync-period=8h",
"--mirroring-concurrent-service-endpoint-syncs=2",
"--mirroring-max-endpoints-per-subset=1000",
"--namespace-sync-period=10m",
"--node-cidr-mask-size=48",
"--node-cidr-mask-size-ipv4=48",
"--node-cidr-mask-size-ipv6=108",
"--node-eviction-rate=0.2",
"--node-monitor-grace-period=30s",
"--node-monitor-period=10s",
"--node-startup-grace-period=30s",
"--pod-eviction-timeout=2m",
"--port=10000",
"--profiling=false",
"--pv-recycler-increment-timeout-nfs=45",
"--pv-recycler-minimum-timeout-hostpath=45",
"--pv-recycler-minimum-timeout-nfs=200",
"--pv-recycler-timeout-increment-hostpath=45",
"--pvclaimbinder-sync-period=30s",
"--resource-quota-sync-period=10m",
"--route-reconciliation-period=30s",
"--secondary-node-eviction-rate=0.05",
"--service-account-private-key-file=/service-account-private-key",
"--terminated-pod-gc-threshold=12000",
"--unhealthy-zone-threshold=0.6",
"--use-service-account-credentials=true",
"--cert-dir=/a/b/c",
"--bind-address=192.168.4.21",
"--secure-port=10001",
"--concurrent-ttl-after-finished-syncs=8",
}
func TestAddFlags(t *testing.T) {
fs := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError)
s, _ := NewKubeControllerManagerOptions()
for _, f := range s.Flags([]string{""}, []string{""}).FlagSets {
fs.AddFlagSet(f)
}
fs.Parse(args)
// Sort GCIgnoredResources because it's built from a map, which means the
// insertion order is random.
sort.Sort(sortedGCIgnoredResources(s.GarbageCollectorController.GCIgnoredResources))
expected := &KubeControllerManagerOptions{
Generic: &cmoptions.GenericControllerManagerConfigurationOptions{
GenericControllerManagerConfiguration: &cmconfig.GenericControllerManagerConfiguration{
Port: 10252, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
MinResyncPeriod: metav1.Duration{Duration: 8 * time.Hour},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
ContentType: "application/json",
QPS: 50.0,
Burst: 100,
},
ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
ResourceLock: "configmap",
LeaderElect: false,
LeaseDuration: metav1.Duration{Duration: 30 * time.Second},
RenewDeadline: metav1.Duration{Duration: 15 * time.Second},
RetryPeriod: metav1.Duration{Duration: 5 * time.Second},
ResourceName: "kube-controller-manager",
ResourceNamespace: "kube-system",
},
Controllers: []string{"foo", "bar"},
},
Debugging: &cmoptions.DebuggingOptions{
DebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{
EnableProfiling: false,
EnableContentionProfiling: true,
},
},
},
KubeCloudShared: &cpoptions.KubeCloudSharedOptions{
KubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{
UseServiceAccountCredentials: true,
RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},
NodeMonitorPeriod: metav1.Duration{Duration: 10 * time.Second},
ClusterName: "k8s",
ClusterCIDR: "1.2.3.4/24",
AllocateNodeCIDRs: true,
CIDRAllocatorType: "CloudAllocator",
ConfigureCloudRoutes: false,
},
CloudProvider: &cpoptions.CloudProviderOptions{
CloudProviderConfiguration: &cpconfig.CloudProviderConfiguration{
Name: "gce",
CloudConfigFile: "/cloud-config",
},
},
},
ServiceController: &cpoptions.ServiceControllerOptions{
ServiceControllerConfiguration: &serviceconfig.ServiceControllerConfiguration{
ConcurrentServiceSyncs: 2,
},
},
AttachDetachController: &AttachDetachControllerOptions{
&attachdetachconfig.AttachDetachControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 30 * time.Second},
DisableAttachDetachReconcilerSync: true,
},
},
CSRSigningController: &CSRSigningControllerOptions{
&csrsigningconfig.CSRSigningControllerConfiguration{
ClusterSigningCertFile: "/cluster-signing-cert",
ClusterSigningKeyFile: "/cluster-signing-key",
ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour},
KubeletServingSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-serving/cert-file",
KeyFile: "/cluster-signing-kubelet-serving/key-file",
},
KubeletClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-client/cert-file",
KeyFile: "/cluster-signing-kubelet-client/key-file",
},
KubeAPIServerClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kube-apiserver-client/cert-file",
KeyFile: "/cluster-signing-kube-apiserver-client/key-file",
},
LegacyUnknownSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-legacy-unknown/cert-file",
KeyFile: "/cluster-signing-legacy-unknown/key-file",
},
},
},
DaemonSetController: &DaemonSetControllerOptions{
&daemonconfig.DaemonSetControllerConfiguration{
ConcurrentDaemonSetSyncs: 2,
},
},
DeploymentController: &DeploymentControllerOptions{
&deploymentconfig.DeploymentControllerConfiguration{
ConcurrentDeploymentSyncs: 10,
DeploymentControllerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
},
},
StatefulSetController: &StatefulSetControllerOptions{
&statefulsetconfig.StatefulSetControllerConfiguration{
ConcurrentStatefulSetSyncs: 15,
},
},
DeprecatedFlags: &DeprecatedControllerOptions{
&kubectrlmgrconfig.DeprecatedControllerConfiguration{
DeletingPodsQPS: 0.1,
RegisterRetryCount: 10,
},
},
EndpointController: &EndpointControllerOptions{
&endpointconfig.EndpointControllerConfiguration{
ConcurrentEndpointSyncs: 10,
},
},
EndpointSliceController: &EndpointSliceControllerOptions{
&endpointsliceconfig.EndpointSliceControllerConfiguration{
ConcurrentServiceEndpointSyncs: 10,
MaxEndpointsPerSlice: 200,
},
},
EndpointSliceMirroringController: &EndpointSliceMirroringControllerOptions{
&endpointslicemirroringconfig.EndpointSliceMirroringControllerConfiguration{
MirroringConcurrentServiceEndpointSyncs: 2,
MirroringMaxEndpointsPerSubset: 1000,
},
},
GarbageCollectorController: &GarbageCollectorControllerOptions{
&garbagecollectorconfig.GarbageCollectorControllerConfiguration{
ConcurrentGCSyncs: 30,
GCIgnoredResources: []garbagecollectorconfig.GroupResource{
{Group: "", Resource: "events"},
},
EnableGarbageCollector: false,
},
},
HPAController: &HPAControllerOptions{
&poautosclerconfig.HPAControllerConfiguration{
HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute},
HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute},
HorizontalPodAutoscalerDownscaleStabilizationWindow: metav1.Duration{Duration: 3 * time.Minute},
HorizontalPodAutoscalerCPUInitializationPeriod: metav1.Duration{Duration: 90 * time.Second},
HorizontalPodAutoscalerInitialReadinessDelay: metav1.Duration{Duration: 50 * time.Second},
HorizontalPodAutoscalerTolerance: 0.1,
HorizontalPodAutoscalerUseRESTClients: true,
},
},
JobController: &JobControllerOptions{
&jobconfig.JobControllerConfiguration{
ConcurrentJobSyncs: 5,
},
},
CronJobController: &CronJobControllerOptions{
&cronjobconfig.CronJobControllerConfiguration{
ConcurrentCronJobSyncs: 5,
},
},
NamespaceController: &NamespaceControllerOptions{
&namespaceconfig.NamespaceControllerConfiguration{
NamespaceSyncPeriod: metav1.Duration{Duration: 10 * time.Minute},
ConcurrentNamespaceSyncs: 20,
},
},
NodeIPAMController: &NodeIPAMControllerOptions{
&nodeipamconfig.NodeIPAMControllerConfiguration{
NodeCIDRMaskSize: 48,
NodeCIDRMaskSizeIPv4: 48,
NodeCIDRMaskSizeIPv6: 108,
},
},
NodeLifecycleController: &NodeLifecycleControllerOptions{
&nodelifecycleconfig.NodeLifecycleControllerConfiguration{
EnableTaintManager: false,
NodeEvictionRate: 0.2,
SecondaryNodeEvictionRate: 0.05,
NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second},
NodeStartupGracePeriod: metav1.Duration{Duration: 30 * time.Second},
PodEvictionTimeout: metav1.Duration{Duration: 2 * time.Minute},
LargeClusterSizeThreshold: 100,
UnhealthyZoneThreshold: 0.6,
},
},
PersistentVolumeBinderController: &PersistentVolumeBinderControllerOptions{
&persistentvolumeconfig.PersistentVolumeBinderControllerConfiguration{
PVClaimBinderSyncPeriod: metav1.Duration{Duration: 30 * time.Second},
VolumeConfiguration: persistentvolumeconfig.VolumeConfiguration{
EnableDynamicProvisioning: false,
EnableHostPathProvisioning: true,
FlexVolumePluginDir: "/flex-volume-plugin",
PersistentVolumeRecyclerConfiguration: persistentvolumeconfig.PersistentVolumeRecyclerConfiguration{
MaximumRetry: 3,
MinimumTimeoutNFS: 200,
IncrementTimeoutNFS: 45,
MinimumTimeoutHostPath: 45,
IncrementTimeoutHostPath: 45,
},
},
VolumeHostCIDRDenylist: []string{"127.0.0.1/28", "feed::/16"},
VolumeHostAllowLocalLoopback: false,
},
},
PodGCController: &PodGCControllerOptions{
&podgcconfig.PodGCControllerConfiguration{
TerminatedPodGCThreshold: 12000,
},
},
ReplicaSetController: &ReplicaSetControllerOptions{
&replicasetconfig.ReplicaSetControllerConfiguration{
ConcurrentRSSyncs: 10,
},
},
ReplicationController: &ReplicationControllerOptions{
&replicationconfig.ReplicationControllerConfiguration{
ConcurrentRCSyncs: 10,
},
},
ResourceQuotaController: &ResourceQuotaControllerOptions{
&resourcequotaconfig.ResourceQuotaControllerConfiguration{
ResourceQuotaSyncPeriod: metav1.Duration{Duration: 10 * time.Minute},
ConcurrentResourceQuotaSyncs: 10,
},
},
SAController: &SAControllerOptions{
&serviceaccountconfig.SAControllerConfiguration{
ServiceAccountKeyFile: "/service-account-private-key",
ConcurrentSATokenSyncs: 10,
},
},
TTLAfterFinishedController: &TTLAfterFinishedControllerOptions{
&ttlafterfinishedconfig.TTLAfterFinishedControllerConfiguration{
ConcurrentTTLSyncs: 8,
},
},
SecureServing: (&apiserveroptions.SecureServingOptions{
BindPort: 10001,
BindAddress: net.ParseIP("192.168.4.21"),
ServerCert: apiserveroptions.GeneratableKeyCert{
CertDirectory: "/a/b/c",
PairName: "kube-controller-manager",
},
HTTP2MaxStreamsPerConnection: 47,
}).WithLoopback(),
InsecureServing: (&apiserveroptions.DeprecatedInsecureServingOptions{
BindAddress: net.ParseIP("192.168.4.10"),
BindPort: int(10000),
BindNetwork: "tcp",
}).WithLoopback(),
Authentication: &apiserveroptions.DelegatingAuthenticationOptions{
CacheTTL: 10 * time.Second,
ClientTimeout: 10 * time.Second,
WebhookRetryBackoff: apiserveroptions.DefaultAuthWebhookRetryBackoff(),
ClientCert: apiserveroptions.ClientCertAuthenticationOptions{},
RequestHeader: apiserveroptions.RequestHeaderAuthenticationOptions{
UsernameHeaders: []string{"x-remote-user"},
GroupHeaders: []string{"x-remote-group"},
ExtraHeaderPrefixes: []string{"x-remote-extra-"},
},
RemoteKubeConfigFileOptional: true,
},
Authorization: &apiserveroptions.DelegatingAuthorizationOptions{
AllowCacheTTL: 10 * time.Second,
DenyCacheTTL: 10 * time.Second,
ClientTimeout: 10 * time.Second,
WebhookRetryBackoff: apiserveroptions.DefaultAuthWebhookRetryBackoff(),
RemoteKubeConfigFileOptional: true,
AlwaysAllowPaths: []string{"/healthz", "/readyz", "/livez"}, // note: this does not match /healthz/ or /healthz/*
AlwaysAllowGroups: []string{"system:masters"},
},
Kubeconfig: "/kubeconfig",
Master: "192.168.4.20",
Metrics: &metrics.Options{},
Logs: logs.NewOptions(),
}
// Sort GCIgnoredResources because it's built from a map, which means the
// insertion order is random.
sort.Sort(sortedGCIgnoredResources(expected.GarbageCollectorController.GCIgnoredResources))
if !reflect.DeepEqual(expected, s) {
t.Errorf("Got different run options than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(expected, s))
}
}
func TestApplyTo(t *testing.T) {
fs := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError)
s, _ := NewKubeControllerManagerOptions()
// flag set to parse the args that are required to start the kube controller manager
for _, f := range s.Flags([]string{""}, []string{""}).FlagSets {
fs.AddFlagSet(f)
}
fs.Parse(args)
// Sort GCIgnoredResources because it's built from a map, which means the
// insertion order is random.
sort.Sort(sortedGCIgnoredResources(s.GarbageCollectorController.GCIgnoredResources))
expected := &kubecontrollerconfig.Config{
ComponentConfig: kubectrlmgrconfig.KubeControllerManagerConfiguration{
Generic: cmconfig.GenericControllerManagerConfiguration{
Port: 10252, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
MinResyncPeriod: metav1.Duration{Duration: 8 * time.Hour},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
ContentType: "application/json",
QPS: 50.0,
Burst: 100,
},
ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
ResourceLock: "configmap",
LeaderElect: false,
LeaseDuration: metav1.Duration{Duration: 30 * time.Second},
RenewDeadline: metav1.Duration{Duration: 15 * time.Second},
RetryPeriod: metav1.Duration{Duration: 5 * time.Second},
ResourceName: "kube-controller-manager",
ResourceNamespace: "kube-system",
},
Controllers: []string{"foo", "bar"},
Debugging: componentbaseconfig.DebuggingConfiguration{
EnableProfiling: false,
EnableContentionProfiling: true,
},
},
KubeCloudShared: cpconfig.KubeCloudSharedConfiguration{
UseServiceAccountCredentials: true,
RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},
NodeMonitorPeriod: metav1.Duration{Duration: 10 * time.Second},
ClusterName: "k8s",
ClusterCIDR: "1.2.3.4/24",
AllocateNodeCIDRs: true,
CIDRAllocatorType: "CloudAllocator",
ConfigureCloudRoutes: false,
CloudProvider: cpconfig.CloudProviderConfiguration{
Name: "gce",
CloudConfigFile: "/cloud-config",
},
},
ServiceController: serviceconfig.ServiceControllerConfiguration{
ConcurrentServiceSyncs: 2,
},
AttachDetachController: attachdetachconfig.AttachDetachControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 30 * time.Second},
DisableAttachDetachReconcilerSync: true,
},
CSRSigningController: csrsigningconfig.CSRSigningControllerConfiguration{
ClusterSigningCertFile: "/cluster-signing-cert",
ClusterSigningKeyFile: "/cluster-signing-key",
ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour},
KubeletServingSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-serving/cert-file",
KeyFile: "/cluster-signing-kubelet-serving/key-file",
},
KubeletClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-client/cert-file",
KeyFile: "/cluster-signing-kubelet-client/key-file",
},
KubeAPIServerClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kube-apiserver-client/cert-file",
KeyFile: "/cluster-signing-kube-apiserver-client/key-file",
},
LegacyUnknownSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-legacy-unknown/cert-file",
KeyFile: "/cluster-signing-legacy-unknown/key-file",
},
},
DaemonSetController: daemonconfig.DaemonSetControllerConfiguration{
ConcurrentDaemonSetSyncs: 2,
},
DeploymentController: deploymentconfig.DeploymentControllerConfiguration{
ConcurrentDeploymentSyncs: 10,
DeploymentControllerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
},
StatefulSetController: statefulsetconfig.StatefulSetControllerConfiguration{
ConcurrentStatefulSetSyncs: 15,
},
DeprecatedController: kubectrlmgrconfig.DeprecatedControllerConfiguration{
DeletingPodsQPS: 0.1,
RegisterRetryCount: 10,
},
EndpointController: endpointconfig.EndpointControllerConfiguration{
ConcurrentEndpointSyncs: 10,
},
EndpointSliceController: endpointsliceconfig.EndpointSliceControllerConfiguration{
ConcurrentServiceEndpointSyncs: 10,
MaxEndpointsPerSlice: 200,
},
EndpointSliceMirroringController: endpointslicemirroringconfig.EndpointSliceMirroringControllerConfiguration{
MirroringConcurrentServiceEndpointSyncs: 2,
MirroringMaxEndpointsPerSubset: 1000,
},
GarbageCollectorController: garbagecollectorconfig.GarbageCollectorControllerConfiguration{
ConcurrentGCSyncs: 30,
GCIgnoredResources: []garbagecollectorconfig.GroupResource{
{Group: "", Resource: "events"},
},
EnableGarbageCollector: false,
},
HPAController: poautosclerconfig.HPAControllerConfiguration{
HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute},
HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute},
HorizontalPodAutoscalerDownscaleStabilizationWindow: metav1.Duration{Duration: 3 * time.Minute},
HorizontalPodAutoscalerCPUInitializationPeriod: metav1.Duration{Duration: 90 * time.Second},
HorizontalPodAutoscalerInitialReadinessDelay: metav1.Duration{Duration: 50 * time.Second},
HorizontalPodAutoscalerTolerance: 0.1,
HorizontalPodAutoscalerUseRESTClients: true,
},
JobController: jobconfig.JobControllerConfiguration{
ConcurrentJobSyncs: 5,
},
CronJobController: cronjobconfig.CronJobControllerConfiguration{
ConcurrentCronJobSyncs: 5,
},
NamespaceController: namespaceconfig.NamespaceControllerConfiguration{
NamespaceSyncPeriod: metav1.Duration{Duration: 10 * time.Minute},
ConcurrentNamespaceSyncs: 20,
},
NodeIPAMController: nodeipamconfig.NodeIPAMControllerConfiguration{
NodeCIDRMaskSize: 48,
NodeCIDRMaskSizeIPv4: 48,
NodeCIDRMaskSizeIPv6: 108,
},
NodeLifecycleController: nodelifecycleconfig.NodeLifecycleControllerConfiguration{
EnableTaintManager: false,
NodeEvictionRate: 0.2,
SecondaryNodeEvictionRate: 0.05,
NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second},
NodeStartupGracePeriod: metav1.Duration{Duration: 30 * time.Second},
PodEvictionTimeout: metav1.Duration{Duration: 2 * time.Minute},
LargeClusterSizeThreshold: 100,
UnhealthyZoneThreshold: 0.6,
},
PersistentVolumeBinderController: persistentvolumeconfig.PersistentVolumeBinderControllerConfiguration{
PVClaimBinderSyncPeriod: metav1.Duration{Duration: 30 * time.Second},
VolumeConfiguration: persistentvolumeconfig.VolumeConfiguration{
EnableDynamicProvisioning: false,
EnableHostPathProvisioning: true,
FlexVolumePluginDir: "/flex-volume-plugin",
PersistentVolumeRecyclerConfiguration: persistentvolumeconfig.PersistentVolumeRecyclerConfiguration{
MaximumRetry: 3,
MinimumTimeoutNFS: 200,
IncrementTimeoutNFS: 45,
MinimumTimeoutHostPath: 45,
IncrementTimeoutHostPath: 45,
},
},
VolumeHostCIDRDenylist: []string{"127.0.0.1/28", "feed::/16"},
VolumeHostAllowLocalLoopback: false,
},
PodGCController: podgcconfig.PodGCControllerConfiguration{
TerminatedPodGCThreshold: 12000,
},
ReplicaSetController: replicasetconfig.ReplicaSetControllerConfiguration{
ConcurrentRSSyncs: 10,
},
ReplicationController: replicationconfig.ReplicationControllerConfiguration{
ConcurrentRCSyncs: 10,
},
ResourceQuotaController: resourcequotaconfig.ResourceQuotaControllerConfiguration{
ResourceQuotaSyncPeriod: metav1.Duration{Duration: 10 * time.Minute},
ConcurrentResourceQuotaSyncs: 10,
},
SAController: serviceaccountconfig.SAControllerConfiguration{
ServiceAccountKeyFile: "/service-account-private-key",
ConcurrentSATokenSyncs: 10,
},
TTLAfterFinishedController: ttlafterfinishedconfig.TTLAfterFinishedControllerConfiguration{
ConcurrentTTLSyncs: 8,
},
},
}
// Sort GCIgnoredResources because it's built from a map, which means the
// insertion order is random.
sort.Sort(sortedGCIgnoredResources(expected.ComponentConfig.GarbageCollectorController.GCIgnoredResources))
c := &kubecontrollerconfig.Config{}
s.ApplyTo(c)
if !reflect.DeepEqual(expected.ComponentConfig, c.ComponentConfig) {
t.Errorf("Got different configuration than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(expected.ComponentConfig, c.ComponentConfig))
}
}
type sortedGCIgnoredResources []garbagecollectorconfig.GroupResource
func (r sortedGCIgnoredResources) Len() int {
return len(r)
}
func (r sortedGCIgnoredResources) Less(i, j int) bool {
if r[i].Group < r[j].Group {
return true
} else if r[i].Group > r[j].Group {
return false
}
return r[i].Resource < r[j].Resource
}
func (r sortedGCIgnoredResources) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
| cmd/kube-controller-manager/app/options/options_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.02188277803361416,
0.0006278840592131019,
0.00016440915351267904,
0.0001697954721748829,
0.0026942575350403786
] |
{
"id": 3,
"code_window": [
"\t\t\t\t\tEnableContentionProfiling: false,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 67
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package copycerts
import (
"context"
"encoding/hex"
"io/ioutil"
"os"
"path"
"regexp"
"testing"
"github.com/lithammer/dedent"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakeclient "k8s.io/client-go/kubernetes/fake"
certutil "k8s.io/client-go/util/cert"
keyutil "k8s.io/client-go/util/keyutil"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
cryptoutil "k8s.io/kubernetes/cmd/kubeadm/app/util/crypto"
testutil "k8s.io/kubernetes/cmd/kubeadm/test"
)
func TestGetDataFromInitConfig(t *testing.T) {
certData := []byte("cert-data")
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
cfg := &kubeadmapi.InitConfiguration{}
cfg.CertificatesDir = tmpdir
key, err := CreateCertificateKey()
if err != nil {
t.Fatalf(dedent.Dedent("failed to create key.\nfatal error: %v"), err)
}
decodedKey, err := hex.DecodeString(key)
if err != nil {
t.Fatalf(dedent.Dedent("failed to decode key.\nfatal error: %v"), err)
}
if err := os.Mkdir(path.Join(tmpdir, "etcd"), 0755); err != nil {
t.Fatalf(dedent.Dedent("failed to create etcd cert dir.\nfatal error: %v"), err)
}
certs := certsToTransfer(cfg)
for name, path := range certs {
if err := ioutil.WriteFile(path, certData, 0644); err != nil {
t.Fatalf(dedent.Dedent("failed to write cert: %s\nfatal error: %v"), name, err)
}
}
secretData, err := getDataFromDisk(cfg, decodedKey)
if err != nil {
t.Fatalf("failed to get secret data. fatal error: %v", err)
}
re := regexp.MustCompile(`[-._a-zA-Z0-9]+`)
for name, data := range secretData {
if !re.MatchString(name) {
t.Fatalf(dedent.Dedent("failed to validate secretData\n %s isn't a valid secret key"), name)
}
decryptedData, err := cryptoutil.DecryptBytes(data, decodedKey)
if string(certData) != string(decryptedData) {
t.Fatalf(dedent.Dedent("can't decrypt cert: %s\nfatal error: %v"), name, err)
}
}
}
func TestCertsToTransfer(t *testing.T) {
localEtcdCfg := &kubeadmapi.InitConfiguration{}
externalEtcdCfg := &kubeadmapi.InitConfiguration{}
externalEtcdCfg.Etcd = kubeadmapi.Etcd{}
externalEtcdCfg.Etcd.External = &kubeadmapi.ExternalEtcd{}
commonExpectedCerts := []string{
kubeadmconstants.CACertName,
kubeadmconstants.CAKeyName,
kubeadmconstants.FrontProxyCACertName,
kubeadmconstants.FrontProxyCAKeyName,
kubeadmconstants.ServiceAccountPublicKeyName,
kubeadmconstants.ServiceAccountPrivateKeyName,
}
tests := map[string]struct {
config *kubeadmapi.InitConfiguration
expectedCerts []string
}{
"local etcd": {
config: localEtcdCfg,
expectedCerts: append(
[]string{kubeadmconstants.EtcdCACertName, kubeadmconstants.EtcdCAKeyName},
commonExpectedCerts...,
),
},
"external etcd": {
config: externalEtcdCfg,
expectedCerts: append(
[]string{externalEtcdCA, externalEtcdCert, externalEtcdKey},
commonExpectedCerts...,
),
},
}
for name, test := range tests {
t.Run(name, func(t2 *testing.T) {
certList := certsToTransfer(test.config)
for _, cert := range test.expectedCerts {
if _, found := certList[cert]; !found {
t2.Fatalf(dedent.Dedent("failed to get list of certs to upload\ncert %s not found"), cert)
}
}
})
}
}
func TestCertOrKeyNameToSecretName(t *testing.T) {
tests := []struct {
keyName string
expectedSecretName string
}{
{
keyName: "apiserver-kubelet-client.crt",
expectedSecretName: "apiserver-kubelet-client.crt",
},
{
keyName: "etcd/ca.crt",
expectedSecretName: "etcd-ca.crt",
},
{
keyName: "etcd/healthcheck-client.crt",
expectedSecretName: "etcd-healthcheck-client.crt",
},
}
for _, tc := range tests {
secretName := certOrKeyNameToSecretName(tc.keyName)
if secretName != tc.expectedSecretName {
t.Fatalf("secret name %s didn't match expected name %s", secretName, tc.expectedSecretName)
}
}
}
func TestUploadCerts(t *testing.T) {
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
secretKey, err := CreateCertificateKey()
if err != nil {
t.Fatalf("could not create certificate key: %v", err)
}
initConfiguration := testutil.GetDefaultInternalConfig(t)
initConfiguration.ClusterConfiguration.CertificatesDir = tmpdir
if err := certs.CreatePKIAssets(initConfiguration); err != nil {
t.Fatalf("error creating PKI assets: %v", err)
}
cs := fakeclient.NewSimpleClientset()
if err := UploadCerts(cs, initConfiguration, secretKey); err != nil {
t.Fatalf("error uploading certs: %v", err)
}
rawSecretKey, err := hex.DecodeString(secretKey)
if err != nil {
t.Fatalf("error decoding key: %v", err)
}
secretMap, err := cs.CoreV1().Secrets(metav1.NamespaceSystem).Get(context.TODO(), kubeadmconstants.KubeadmCertsSecret, metav1.GetOptions{})
if err != nil {
t.Fatalf("could not fetch secret: %v", err)
}
for certName, certPath := range certsToTransfer(initConfiguration) {
secretCertData, err := cryptoutil.DecryptBytes(secretMap.Data[certOrKeyNameToSecretName(certName)], rawSecretKey)
if err != nil {
t.Fatalf("error decrypting secret data: %v", err)
}
diskCertData, err := ioutil.ReadFile(certPath)
if err != nil {
t.Fatalf("error reading certificate from disk: %v", err)
}
// Check that the encrypted contents on the secret match the contents on disk, and that all
// the expected certificates are in the secret
if string(secretCertData) != string(diskCertData) {
t.Fatalf("cert %s does not have the expected contents. contents: %q; expected contents: %q", certName, string(secretCertData), string(diskCertData))
}
}
}
func TestDownloadCerts(t *testing.T) {
secretKey, err := CreateCertificateKey()
if err != nil {
t.Fatalf("could not create certificate key: %v", err)
}
// Temporary directory where certificates will be generated
tmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(tmpdir)
initConfiguration := testutil.GetDefaultInternalConfig(t)
initConfiguration.ClusterConfiguration.CertificatesDir = tmpdir
// Temporary directory where certificates will be downloaded to
targetTmpdir := testutil.SetupTempDir(t)
defer os.RemoveAll(targetTmpdir)
initForDownloadConfiguration := testutil.GetDefaultInternalConfig(t)
initForDownloadConfiguration.ClusterConfiguration.CertificatesDir = targetTmpdir
if err := certs.CreatePKIAssets(initConfiguration); err != nil {
t.Fatalf("error creating PKI assets: %v", err)
}
kubeadmCertsSecret := createKubeadmCertsSecret(t, initConfiguration, secretKey)
cs := fakeclient.NewSimpleClientset(kubeadmCertsSecret)
if err := DownloadCerts(cs, initForDownloadConfiguration, secretKey); err != nil {
t.Fatalf("error downloading certs: %v", err)
}
const keyFileMode = 0600
const certFileMode = 0644
for certName, certPath := range certsToTransfer(initForDownloadConfiguration) {
diskCertData, err := ioutil.ReadFile(certPath)
if err != nil {
t.Errorf("error reading certificate from disk: %v", err)
}
// Check that the written files are either certificates or keys, and that they have
// the expected permissions
if _, err := keyutil.ParsePublicKeysPEM(diskCertData); err == nil {
if stat, err := os.Stat(certPath); err == nil {
if stat.Mode() != keyFileMode {
t.Errorf("key %q should have mode %#o, has %#o", certName, keyFileMode, stat.Mode())
}
} else {
t.Errorf("could not stat key %q: %v", certName, err)
}
} else if _, err := certutil.ParseCertsPEM(diskCertData); err == nil {
if stat, err := os.Stat(certPath); err == nil {
if stat.Mode() != certFileMode {
t.Errorf("cert %q should have mode %#o, has %#o", certName, certFileMode, stat.Mode())
}
} else {
t.Errorf("could not stat cert %q: %v", certName, err)
}
} else {
t.Errorf("secret %q was not identified as a cert or as a key", certName)
}
}
}
func createKubeadmCertsSecret(t *testing.T, cfg *kubeadmapi.InitConfiguration, secretKey string) *v1.Secret {
decodedKey, err := hex.DecodeString(secretKey)
if err != nil {
t.Fatalf("error decoding key: %v", err)
}
secretData, err := getDataFromDisk(cfg, decodedKey)
if err != nil {
t.Fatalf("error creating secret data: %v", err)
}
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmconstants.KubeadmCertsSecret,
Namespace: metav1.NamespaceSystem,
},
Data: secretData,
}
}
| cmd/kubeadm/app/phases/copycerts/copycerts_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00026473411708138883,
0.0001754497061483562,
0.00016591802705079317,
0.00017105534789152443,
0.00001775756936694961
] |
{
"id": 3,
"code_window": [
"\t\t\t\t\tEnableContentionProfiling: false,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 67
} | /*
Ginkgo accepts a number of configuration options.
These are documented [here](http://onsi.github.io/ginkgo/#the_ginkgo_cli)
You can also learn more via
ginkgo help
or (I kid you not):
go test -asdf
*/
package config
import (
"flag"
"time"
"fmt"
)
const VERSION = "1.11.0"
type GinkgoConfigType struct {
RandomSeed int64
RandomizeAllSpecs bool
RegexScansFilePath bool
FocusString string
SkipString string
SkipMeasurements bool
FailOnPending bool
FailFast bool
FlakeAttempts int
EmitSpecProgress bool
DryRun bool
DebugParallel bool
ParallelNode int
ParallelTotal int
SyncHost string
StreamHost string
}
var GinkgoConfig = GinkgoConfigType{}
type DefaultReporterConfigType struct {
NoColor bool
SlowSpecThreshold float64
NoisyPendings bool
NoisySkippings bool
Succinct bool
Verbose bool
FullTrace bool
ReportPassed bool
ReportFile string
}
var DefaultReporterConfig = DefaultReporterConfigType{}
func processPrefix(prefix string) string {
if prefix != "" {
prefix += "."
}
return prefix
}
func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) {
prefix = processPrefix(prefix)
flagSet.Int64Var(&(GinkgoConfig.RandomSeed), prefix+"seed", time.Now().Unix(), "The seed used to randomize the spec suite.")
flagSet.BoolVar(&(GinkgoConfig.RandomizeAllSpecs), prefix+"randomizeAllSpecs", false, "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When groups.")
flagSet.BoolVar(&(GinkgoConfig.SkipMeasurements), prefix+"skipMeasurements", false, "If set, ginkgo will skip any measurement specs.")
flagSet.BoolVar(&(GinkgoConfig.FailOnPending), prefix+"failOnPending", false, "If set, ginkgo will mark the test suite as failed if any specs are pending.")
flagSet.BoolVar(&(GinkgoConfig.FailFast), prefix+"failFast", false, "If set, ginkgo will stop running a test suite after a failure occurs.")
flagSet.BoolVar(&(GinkgoConfig.DryRun), prefix+"dryRun", false, "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v.")
flagSet.StringVar(&(GinkgoConfig.FocusString), prefix+"focus", "", "If set, ginkgo will only run specs that match this regular expression.")
flagSet.StringVar(&(GinkgoConfig.SkipString), prefix+"skip", "", "If set, ginkgo will only run specs that do not match this regular expression.")
flagSet.BoolVar(&(GinkgoConfig.RegexScansFilePath), prefix+"regexScansFilePath", false, "If set, ginkgo regex matching also will look at the file path (code location).")
flagSet.IntVar(&(GinkgoConfig.FlakeAttempts), prefix+"flakeAttempts", 1, "Make up to this many attempts to run each spec. Please note that if any of the attempts succeed, the suite will not be failed. But any failures will still be recorded.")
flagSet.BoolVar(&(GinkgoConfig.EmitSpecProgress), prefix+"progress", false, "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter.")
flagSet.BoolVar(&(GinkgoConfig.DebugParallel), prefix+"debug", false, "If set, ginkgo will emit node output to files when running in parallel.")
if includeParallelFlags {
flagSet.IntVar(&(GinkgoConfig.ParallelNode), prefix+"parallel.node", 1, "This worker node's (one-indexed) node number. For running specs in parallel.")
flagSet.IntVar(&(GinkgoConfig.ParallelTotal), prefix+"parallel.total", 1, "The total number of worker nodes. For running specs in parallel.")
flagSet.StringVar(&(GinkgoConfig.SyncHost), prefix+"parallel.synchost", "", "The address for the server that will synchronize the running nodes.")
flagSet.StringVar(&(GinkgoConfig.StreamHost), prefix+"parallel.streamhost", "", "The address for the server that the running nodes should stream data to.")
}
flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.")
flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.")
flagSet.BoolVar(&(DefaultReporterConfig.NoisySkippings), prefix+"noisySkippings", true, "If set, default reporter will shout about skipping tests.")
flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.")
flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report")
flagSet.BoolVar(&(DefaultReporterConfig.FullTrace), prefix+"trace", false, "If set, default reporter prints out the full stack trace when a failure occurs")
flagSet.BoolVar(&(DefaultReporterConfig.ReportPassed), prefix+"reportPassed", false, "If set, default reporter prints out captured output of passed tests.")
flagSet.StringVar(&(DefaultReporterConfig.ReportFile), prefix+"reportFile", "", "Override the default reporter output file path.")
}
func BuildFlagArgs(prefix string, ginkgo GinkgoConfigType, reporter DefaultReporterConfigType) []string {
prefix = processPrefix(prefix)
result := make([]string, 0)
if ginkgo.RandomSeed > 0 {
result = append(result, fmt.Sprintf("--%sseed=%d", prefix, ginkgo.RandomSeed))
}
if ginkgo.RandomizeAllSpecs {
result = append(result, fmt.Sprintf("--%srandomizeAllSpecs", prefix))
}
if ginkgo.SkipMeasurements {
result = append(result, fmt.Sprintf("--%sskipMeasurements", prefix))
}
if ginkgo.FailOnPending {
result = append(result, fmt.Sprintf("--%sfailOnPending", prefix))
}
if ginkgo.FailFast {
result = append(result, fmt.Sprintf("--%sfailFast", prefix))
}
if ginkgo.DryRun {
result = append(result, fmt.Sprintf("--%sdryRun", prefix))
}
if ginkgo.FocusString != "" {
result = append(result, fmt.Sprintf("--%sfocus=%s", prefix, ginkgo.FocusString))
}
if ginkgo.SkipString != "" {
result = append(result, fmt.Sprintf("--%sskip=%s", prefix, ginkgo.SkipString))
}
if ginkgo.FlakeAttempts > 1 {
result = append(result, fmt.Sprintf("--%sflakeAttempts=%d", prefix, ginkgo.FlakeAttempts))
}
if ginkgo.EmitSpecProgress {
result = append(result, fmt.Sprintf("--%sprogress", prefix))
}
if ginkgo.DebugParallel {
result = append(result, fmt.Sprintf("--%sdebug", prefix))
}
if ginkgo.ParallelNode != 0 {
result = append(result, fmt.Sprintf("--%sparallel.node=%d", prefix, ginkgo.ParallelNode))
}
if ginkgo.ParallelTotal != 0 {
result = append(result, fmt.Sprintf("--%sparallel.total=%d", prefix, ginkgo.ParallelTotal))
}
if ginkgo.StreamHost != "" {
result = append(result, fmt.Sprintf("--%sparallel.streamhost=%s", prefix, ginkgo.StreamHost))
}
if ginkgo.SyncHost != "" {
result = append(result, fmt.Sprintf("--%sparallel.synchost=%s", prefix, ginkgo.SyncHost))
}
if ginkgo.RegexScansFilePath {
result = append(result, fmt.Sprintf("--%sregexScansFilePath", prefix))
}
if reporter.NoColor {
result = append(result, fmt.Sprintf("--%snoColor", prefix))
}
if reporter.SlowSpecThreshold > 0 {
result = append(result, fmt.Sprintf("--%sslowSpecThreshold=%.5f", prefix, reporter.SlowSpecThreshold))
}
if !reporter.NoisyPendings {
result = append(result, fmt.Sprintf("--%snoisyPendings=false", prefix))
}
if !reporter.NoisySkippings {
result = append(result, fmt.Sprintf("--%snoisySkippings=false", prefix))
}
if reporter.Verbose {
result = append(result, fmt.Sprintf("--%sv", prefix))
}
if reporter.Succinct {
result = append(result, fmt.Sprintf("--%ssuccinct", prefix))
}
if reporter.FullTrace {
result = append(result, fmt.Sprintf("--%strace", prefix))
}
if reporter.ReportPassed {
result = append(result, fmt.Sprintf("--%sreportPassed", prefix))
}
if reporter.ReportFile != "" {
result = append(result, fmt.Sprintf("--%sreportFile=%s", prefix, reporter.ReportFile))
}
return result
}
| vendor/github.com/onsi/ginkgo/config/config.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00018936464039143175,
0.00017398329509887844,
0.00016581079398747534,
0.0001736658305162564,
0.000006075204055377981
] |
{
"id": 3,
"code_window": [
"\t\t\t\t\tEnableContentionProfiling: false,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 67
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package embed
import (
"context"
"fmt"
"io/ioutil"
defaultLog "log"
"net"
"net/http"
"strings"
"go.etcd.io/etcd/clientv3/credentials"
"go.etcd.io/etcd/etcdserver"
"go.etcd.io/etcd/etcdserver/api/v3client"
"go.etcd.io/etcd/etcdserver/api/v3election"
"go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb"
v3electiongw "go.etcd.io/etcd/etcdserver/api/v3election/v3electionpb/gw"
"go.etcd.io/etcd/etcdserver/api/v3lock"
"go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb"
v3lockgw "go.etcd.io/etcd/etcdserver/api/v3lock/v3lockpb/gw"
"go.etcd.io/etcd/etcdserver/api/v3rpc"
etcdservergw "go.etcd.io/etcd/etcdserver/etcdserverpb/gw"
"go.etcd.io/etcd/pkg/debugutil"
"go.etcd.io/etcd/pkg/httputil"
"go.etcd.io/etcd/pkg/transport"
gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/soheilhy/cmux"
"github.com/tmc/grpc-websocket-proxy/wsproxy"
"go.uber.org/zap"
"golang.org/x/net/trace"
"google.golang.org/grpc"
)
type serveCtx struct {
lg *zap.Logger
l net.Listener
addr string
network string
secure bool
insecure bool
ctx context.Context
cancel context.CancelFunc
userHandlers map[string]http.Handler
serviceRegister func(*grpc.Server)
serversC chan *servers
}
type servers struct {
secure bool
grpc *grpc.Server
http *http.Server
}
func newServeCtx(lg *zap.Logger) *serveCtx {
ctx, cancel := context.WithCancel(context.Background())
return &serveCtx{
lg: lg,
ctx: ctx,
cancel: cancel,
userHandlers: make(map[string]http.Handler),
serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
}
}
// serve accepts incoming connections on the listener l,
// creating a new service goroutine for each. The service goroutines
// read requests and then call handler to reply to them.
func (sctx *serveCtx) serve(
s *etcdserver.EtcdServer,
tlsinfo *transport.TLSInfo,
handler http.Handler,
errHandler func(error),
gopts ...grpc.ServerOption) (err error) {
logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
<-s.ReadyNotify()
if sctx.lg == nil {
plog.Info("ready to serve client requests")
}
m := cmux.New(sctx.l)
v3c := v3client.New(s)
servElection := v3election.NewElectionServer(v3c)
servLock := v3lock.NewLockServer(v3c)
var gs *grpc.Server
defer func() {
if err != nil && gs != nil {
gs.Stop()
}
}()
if sctx.insecure {
gs = v3rpc.Server(s, nil, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {
sctx.serviceRegister(gs)
}
grpcl := m.Match(cmux.HTTP2())
go func() { errHandler(gs.Serve(grpcl)) }()
var gwmux *gw.ServeMux
if s.Cfg.EnableGRPCGateway {
gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
if err != nil {
return err
}
}
httpmux := sctx.createMux(gwmux, handler)
srvhttp := &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux),
ErrorLog: logger, // do not log user error
}
httpl := m.Match(cmux.HTTP1())
go func() { errHandler(srvhttp.Serve(httpl)) }()
sctx.serversC <- &servers{grpc: gs, http: srvhttp}
if sctx.lg != nil {
sctx.lg.Info(
"serving client traffic insecurely; this is strongly discouraged!",
zap.String("address", sctx.l.Addr().String()),
)
} else {
plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String())
}
}
if sctx.secure {
tlscfg, tlsErr := tlsinfo.ServerConfig()
if tlsErr != nil {
return tlsErr
}
gs = v3rpc.Server(s, tlscfg, gopts...)
v3electionpb.RegisterElectionServer(gs, servElection)
v3lockpb.RegisterLockServer(gs, servLock)
if sctx.serviceRegister != nil {
sctx.serviceRegister(gs)
}
handler = grpcHandlerFunc(gs, handler)
var gwmux *gw.ServeMux
if s.Cfg.EnableGRPCGateway {
dtls := tlscfg.Clone()
// trust local server
dtls.InsecureSkipVerify = true
bundle := credentials.NewBundle(credentials.Config{TLSConfig: dtls})
opts := []grpc.DialOption{grpc.WithTransportCredentials(bundle.TransportCredentials())}
gwmux, err = sctx.registerGateway(opts)
if err != nil {
return err
}
}
var tlsl net.Listener
tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
if err != nil {
return err
}
// TODO: add debug flag; enable logging when debug flag is set
httpmux := sctx.createMux(gwmux, handler)
srv := &http.Server{
Handler: createAccessController(sctx.lg, s, httpmux),
TLSConfig: tlscfg,
ErrorLog: logger, // do not log user error
}
go func() { errHandler(srv.Serve(tlsl)) }()
sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
if sctx.lg != nil {
sctx.lg.Info(
"serving client traffic securely",
zap.String("address", sctx.l.Addr().String()),
)
} else {
plog.Infof("serving client requests on %s", sctx.l.Addr().String())
}
}
close(sctx.serversC)
return m.Serve()
}
// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
// connections or otherHandler otherwise. Given in gRPC docs.
func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
if otherHandler == nil {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
grpcServer.ServeHTTP(w, r)
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
grpcServer.ServeHTTP(w, r)
} else {
otherHandler.ServeHTTP(w, r)
}
})
}
type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
ctx := sctx.ctx
addr := sctx.addr
if network := sctx.network; network == "unix" {
// explicitly define unix network for gRPC socket support
addr = fmt.Sprintf("%s://%s", network, addr)
}
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil {
return nil, err
}
gwmux := gw.NewServeMux()
handlers := []registerHandlerFunc{
etcdservergw.RegisterKVHandler,
etcdservergw.RegisterWatchHandler,
etcdservergw.RegisterLeaseHandler,
etcdservergw.RegisterClusterHandler,
etcdservergw.RegisterMaintenanceHandler,
etcdservergw.RegisterAuthHandler,
v3lockgw.RegisterLockHandler,
v3electiongw.RegisterElectionHandler,
}
for _, h := range handlers {
if err := h(ctx, gwmux, conn); err != nil {
return nil, err
}
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
if sctx.lg != nil {
sctx.lg.Warn(
"failed to close connection",
zap.String("address", sctx.l.Addr().String()),
zap.Error(cerr),
)
} else {
plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
}
}
}()
return gwmux, nil
}
func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
httpmux := http.NewServeMux()
for path, h := range sctx.userHandlers {
httpmux.Handle(path, h)
}
if gwmux != nil {
httpmux.Handle(
"/v3/",
wsproxy.WebsocketProxy(
gwmux,
wsproxy.WithRequestMutator(
// Default to the POST method for streams
func(_ *http.Request, outgoing *http.Request) *http.Request {
outgoing.Method = "POST"
return outgoing
},
),
),
)
}
if handler != nil {
httpmux.Handle("/", handler)
}
return httpmux
}
// createAccessController wraps HTTP multiplexer:
// - mutate gRPC gateway request paths
// - check hostname whitelist
// client HTTP requests goes here first
func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler {
return &accessController{lg: lg, s: s, mux: mux}
}
type accessController struct {
lg *zap.Logger
s *etcdserver.EtcdServer
mux *http.ServeMux
}
func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// redirect for backward compatibilities
if req != nil && req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3beta/") {
req.URL.Path = strings.Replace(req.URL.Path, "/v3beta/", "/v3/", 1)
}
if req.TLS == nil { // check origin if client connection is not secure
host := httputil.GetHostname(req)
if !ac.s.AccessController.IsHostWhitelisted(host) {
if ac.lg != nil {
ac.lg.Warn(
"rejecting HTTP request to prevent DNS rebinding attacks",
zap.String("host", host),
)
} else {
plog.Warningf("rejecting HTTP request from %q to prevent DNS rebinding attacks", host)
}
// TODO: use Go's "http.StatusMisdirectedRequest" (421)
// https://github.com/golang/go/commit/4b8a7eafef039af1834ef9bfa879257c4a72b7b5
http.Error(rw, errCVE20185702(host), 421)
return
}
} else if ac.s.Cfg.ClientCertAuthEnabled && ac.s.Cfg.EnableGRPCGateway &&
ac.s.AuthStore().IsAuthEnabled() && strings.HasPrefix(req.URL.Path, "/v3/") {
for _, chains := range req.TLS.VerifiedChains {
if len(chains) < 1 {
continue
}
if len(chains[0].Subject.CommonName) != 0 {
http.Error(rw, "CommonName of client sending a request against gateway will be ignored and not used as expected", 400)
return
}
}
}
// Write CORS header.
if ac.s.AccessController.OriginAllowed("*") {
addCORSHeader(rw, "*")
} else if origin := req.Header.Get("Origin"); ac.s.OriginAllowed(origin) {
addCORSHeader(rw, origin)
}
if req.Method == "OPTIONS" {
rw.WriteHeader(http.StatusOK)
return
}
ac.mux.ServeHTTP(rw, req)
}
// addCORSHeader adds the correct cors headers given an origin
func addCORSHeader(w http.ResponseWriter, origin string) {
w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
w.Header().Add("Access-Control-Allow-Origin", origin)
w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
}
// https://github.com/transmission/transmission/pull/468
func errCVE20185702(host string) string {
return fmt.Sprintf(`
etcd received your request, but the Host header was unrecognized.
To fix this, choose one of the following options:
- Enable TLS, then any HTTPS request will be allowed.
- Add the hostname you want to use to the whitelist in settings.
- e.g. etcd --host-whitelist %q
This requirement has been added to help prevent "DNS Rebinding" attacks (CVE-2018-5702).
`, host)
}
// WrapCORS wraps existing handler with CORS.
// TODO: deprecate this after v2 proxy deprecate
func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler {
return &corsHandler{
ac: &etcdserver.AccessController{CORS: cors},
h: h,
}
}
type corsHandler struct {
ac *etcdserver.AccessController
h http.Handler
}
func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if ch.ac.OriginAllowed("*") {
addCORSHeader(rw, "*")
} else if origin := req.Header.Get("Origin"); ch.ac.OriginAllowed(origin) {
addCORSHeader(rw, origin)
}
if req.Method == "OPTIONS" {
rw.WriteHeader(http.StatusOK)
return
}
ch.h.ServeHTTP(rw, req)
}
func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
if sctx.userHandlers[s] != nil {
if sctx.lg != nil {
sctx.lg.Warn("path is already registered by user handler", zap.String("path", s))
} else {
plog.Warningf("path %s already registered by user handler", s)
}
return
}
sctx.userHandlers[s] = h
}
func (sctx *serveCtx) registerPprof() {
for p, h := range debugutil.PProfHandlers() {
sctx.registerUserHandler(p, h)
}
}
func (sctx *serveCtx) registerTrace() {
reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
}
| vendor/go.etcd.io/etcd/embed/serve.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.0006616838509216905,
0.00018137795268557966,
0.00016283232253044844,
0.00016860502364579588,
0.00007373114203801379
] |
{
"id": 4,
"code_window": [
"\t\t\t\tDebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{\n",
"\t\t\t\t\tEnableProfiling: false,\n",
"\t\t\t\t\tEnableContentionProfiling: true,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n",
"\t\t\t\tRouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 205
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/config/options"
cmconfig "k8s.io/controller-manager/config"
migration "k8s.io/controller-manager/pkg/leadermigration/options"
)
// GenericControllerManagerConfigurationOptions holds the options which are generic.
type GenericControllerManagerConfigurationOptions struct {
*cmconfig.GenericControllerManagerConfiguration
Debugging *DebuggingOptions
// LeaderMigration is the options for leader migration, a nil indicates default options should be applied.
LeaderMigration *migration.LeaderMigrationOptions
}
// NewGenericControllerManagerConfigurationOptions returns generic configuration default values for both
// the kube-controller-manager and the cloud-contoller-manager. Any common changes should
// be made here. Any individual changes should be made in that controller.
func NewGenericControllerManagerConfigurationOptions(cfg *cmconfig.GenericControllerManagerConfiguration) *GenericControllerManagerConfigurationOptions {
o := &GenericControllerManagerConfigurationOptions{
GenericControllerManagerConfiguration: cfg,
Debugging: RecommendedDebuggingOptions(),
LeaderMigration: nil,
}
return o
}
// AddFlags adds flags related to generic for controller manager to the specified FlagSet.
func (o *GenericControllerManagerConfigurationOptions) AddFlags(fss *cliflag.NamedFlagSets, allControllers, disabledByDefaultControllers []string) {
if o == nil {
return
}
o.Debugging.AddFlags(fss.FlagSet("debugging"))
o.LeaderMigration.AddFlags(fss.FlagSet("leader-migration"))
genericfs := fss.FlagSet("generic")
genericfs.DurationVar(&o.MinResyncPeriod.Duration, "min-resync-period", o.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.")
genericfs.StringVar(&o.ClientConnection.ContentType, "kube-api-content-type", o.ClientConnection.ContentType, "Content type of requests sent to apiserver.")
genericfs.Float32Var(&o.ClientConnection.QPS, "kube-api-qps", o.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver.")
genericfs.Int32Var(&o.ClientConnection.Burst, "kube-api-burst", o.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver.")
genericfs.DurationVar(&o.ControllerStartInterval.Duration, "controller-start-interval", o.ControllerStartInterval.Duration, "Interval between starting controller managers.")
genericfs.StringSliceVar(&o.Controllers, "controllers", o.Controllers, fmt.Sprintf(""+
"A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+
"named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s",
strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", ")))
options.BindLeaderElectionFlags(&o.LeaderElection, genericfs)
}
// ApplyTo fills up generic config with options.
func (o *GenericControllerManagerConfigurationOptions) ApplyTo(cfg *cmconfig.GenericControllerManagerConfiguration) error {
if o == nil {
return nil
}
if err := o.Debugging.ApplyTo(&cfg.Debugging); err != nil {
return err
}
if err := o.LeaderMigration.ApplyTo(cfg); err != nil {
return err
}
cfg.Port = o.Port
cfg.Address = o.Address
cfg.MinResyncPeriod = o.MinResyncPeriod
cfg.ClientConnection = o.ClientConnection
cfg.ControllerStartInterval = o.ControllerStartInterval
cfg.LeaderElection = o.LeaderElection
cfg.Controllers = o.Controllers
return nil
}
// Validate checks validation of GenericOptions.
func (o *GenericControllerManagerConfigurationOptions) Validate(allControllers []string, disabledByDefaultControllers []string) []error {
if o == nil {
return nil
}
errs := []error{}
errs = append(errs, o.Debugging.Validate()...)
allControllersSet := sets.NewString(allControllers...)
for _, controller := range o.Controllers {
if controller == "*" {
continue
}
controller = strings.TrimPrefix(controller, "-")
if !allControllersSet.Has(controller) {
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller))
}
}
return errs
}
| staging/src/k8s.io/controller-manager/options/generic.go | 1 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00193254672922194,
0.0005075673689134419,
0.00016374517872463912,
0.00016984101966954768,
0.000631161849014461
] |
{
"id": 4,
"code_window": [
"\t\t\t\tDebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{\n",
"\t\t\t\t\tEnableProfiling: false,\n",
"\t\t\t\t\tEnableContentionProfiling: true,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n",
"\t\t\t\tRouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 205
} | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "autoscaling"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta2"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
// Adds the list of known types to the given scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&HorizontalPodAutoscaler{},
&HorizontalPodAutoscalerList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
| staging/src/k8s.io/api/autoscaling/v2beta2/register.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00022353397798724473,
0.00017999338160734624,
0.0001609759492566809,
0.00017411564476788044,
0.00002010680691455491
] |
{
"id": 4,
"code_window": [
"\t\t\t\tDebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{\n",
"\t\t\t\t\tEnableProfiling: false,\n",
"\t\t\t\t\tEnableContentionProfiling: true,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n",
"\t\t\t\tRouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 205
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1beta1
import (
"context"
time "time"
storagev1beta1 "k8s.io/api/storage/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
kubernetes "k8s.io/client-go/kubernetes"
v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
cache "k8s.io/client-go/tools/cache"
)
// CSIDriverInformer provides access to a shared informer and lister for
// CSIDrivers.
type CSIDriverInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1beta1.CSIDriverLister
}
type cSIDriverInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// NewCSIDriverInformer constructs a new informer for CSIDriver type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredCSIDriverInformer(client, resyncPeriod, indexers, nil)
}
// NewFilteredCSIDriverInformer constructs a new informer for CSIDriver type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredCSIDriverInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.StorageV1beta1().CSIDrivers().List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.StorageV1beta1().CSIDrivers().Watch(context.TODO(), options)
},
},
&storagev1beta1.CSIDriver{},
resyncPeriod,
indexers,
)
}
func (f *cSIDriverInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredCSIDriverInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *cSIDriverInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&storagev1beta1.CSIDriver{}, f.defaultInformer)
}
func (f *cSIDriverInformer) Lister() v1beta1.CSIDriverLister {
return v1beta1.NewCSIDriverLister(f.Informer().GetIndexer())
}
| staging/src/k8s.io/client-go/informers/storage/v1beta1/csidriver.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.000946564192418009,
0.00025691164773888886,
0.0001646606542635709,
0.00017136649694293737,
0.0002438597148284316
] |
{
"id": 4,
"code_window": [
"\t\t\t\tDebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{\n",
"\t\t\t\t\tEnableProfiling: false,\n",
"\t\t\t\t\tEnableContentionProfiling: true,\n",
"\t\t\t\t},\n",
"\t\t\t},\n",
"\t\t},\n",
"\t\tKubeCloudShared: &KubeCloudSharedOptions{\n",
"\t\t\tKubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{\n",
"\t\t\t\tRouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/cloud-provider/options/options_test.go",
"type": "add",
"edit_start_line_idx": 205
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package daemon
import (
"testing"
"time"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/intstr"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/daemon/util"
"k8s.io/kubernetes/pkg/features"
)
func TestDaemonSetUpdatesPods(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 2
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)
// surge is thhe controlling amount
maxSurge := 2
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
manager.dsStore.Update(ds)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, 0, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 5%maxSurge, maxSurge, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
markPodsReady(podControl.podStore)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5%maxSurge, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
}
func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
err = manager.dsStore.Add(ds)
if err != nil {
t.Fatal(err)
}
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
err = manager.dsStore.Update(ds)
if err != nil {
t.Fatal(err)
}
// new pods are not ready numUnavailable == maxUnavailable
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
err = manager.dsStore.Add(ds)
if err != nil {
t.Fatal(err)
}
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
err = manager.dsStore.Update(ds)
if err != nil {
t.Fatal(err)
}
// all old pods are unavailable so should be removed
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, true)()
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
maxSurge := 3
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
manager.dsStore.Update(ds)
// all old pods are unavailable so should be surged
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(100, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
// waiting for pods to go ready, old pods are deleted
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(200, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0)
setPodReadiness(t, manager, true, 5, func(_ *v1.Pod) bool { return true })
ds.Spec.MinReadySeconds = 15
ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3"
manager.dsStore.Update(ds)
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(300, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0)
hash, err := currentDSHash(manager, ds)
if err != nil {
t.Fatal(err)
}
currentPods := podsByNodeMatchingHash(manager, hash)
// mark two updated pods as ready at time 300
setPodReadiness(t, manager, true, 2, func(pod *v1.Pod) bool {
return pod.Labels[apps.ControllerRevisionHashLabelKey] == hash
})
// mark one of the old pods that is on a node without an updated pod as unready
setPodReadiness(t, manager, false, 1, func(pod *v1.Pod) bool {
nodeName, err := util.GetTargetNodeName(pod)
if err != nil {
t.Fatal(err)
}
return pod.Labels[apps.ControllerRevisionHashLabelKey] != hash && len(currentPods[nodeName]) == 0
})
// the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace
// the deleted old pod
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(310, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
// the new pods are now considered available, so delete the old pods
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(320, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0)
// mark all updated pods as ready at time 320
currentPods = podsByNodeMatchingHash(manager, hash)
setPodReadiness(t, manager, true, 3, func(pod *v1.Pod) bool {
return pod.Labels[apps.ControllerRevisionHashLabelKey] == hash
})
// the new pods are now considered available, so delete the old pods
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(340, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0)
// controller has completed upgrade
manager.failedPodsBackoff.Clock = clock.NewFakeClock(time.Unix(350, 0))
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
}
func podsByNodeMatchingHash(dsc *daemonSetsController, hash string) map[string][]string {
byNode := make(map[string][]string)
for _, obj := range dsc.podStore.List() {
pod := obj.(*v1.Pod)
if pod.Labels[apps.ControllerRevisionHashLabelKey] != hash {
continue
}
nodeName, err := util.GetTargetNodeName(pod)
if err != nil {
panic(err)
}
byNode[nodeName] = append(byNode[nodeName], pod.Name)
}
return byNode
}
func setPodReadiness(t *testing.T, dsc *daemonSetsController, ready bool, count int, fn func(*v1.Pod) bool) {
t.Helper()
for _, obj := range dsc.podStore.List() {
if count <= 0 {
break
}
pod := obj.(*v1.Pod)
if pod.DeletionTimestamp != nil {
continue
}
if podutil.IsPodReady(pod) == ready {
continue
}
if !fn(pod) {
continue
}
condition := v1.PodCondition{Type: v1.PodReady}
if ready {
condition.Status = v1.ConditionTrue
} else {
condition.Status = v1.ConditionFalse
}
if !podutil.UpdatePodCondition(&pod.Status, &condition) {
t.Fatal("failed to update pod")
}
// TODO: workaround UpdatePodCondition calling time.Now() directly
setCondition := podutil.GetPodReadyCondition(pod.Status)
setCondition.LastTransitionTime.Time = dsc.failedPodsBackoff.Clock.Now()
klog.Infof("marked pod %s ready=%t", pod.Name, ready)
count--
}
if count > 0 {
t.Fatalf("could not mark %d pods ready=%t", count, ready)
}
}
func currentDSHash(dsc *daemonSetsController, ds *apps.DaemonSet) (string, error) {
// Construct histories of the DaemonSet, and get the hash of current history
cur, _, err := dsc.constructHistory(ds)
if err != nil {
return "", err
}
return cur.Labels[apps.DefaultDaemonSetUniqueLabelKey], nil
}
func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
ds := newDaemonSet("foo")
manager, podControl, _, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
maxUnavailable := 3
addNodes(manager.nodeStore, 0, 5, nil)
manager.dsStore.Add(ds)
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
intStr := intstr.FromInt(maxUnavailable)
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
// template is not changed no pod should be removed
clearExpectations(t, manager, ds, podControl)
expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
clearExpectations(t, manager, ds, podControl)
}
func newUpdateSurge(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
zero := intstr.FromInt(0)
return apps.DaemonSetUpdateStrategy{
Type: apps.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &apps.RollingUpdateDaemonSet{
MaxUnavailable: &zero,
MaxSurge: &value,
},
}
}
func newUpdateUnavailable(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
return apps.DaemonSetUpdateStrategy{
Type: apps.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &apps.RollingUpdateDaemonSet{
MaxUnavailable: &value,
},
}
}
func TestGetUnavailableNumbers(t *testing.T) {
cases := []struct {
name string
Manager *daemonSetsController
ds *apps.DaemonSet
nodeToPods map[string][]*v1.Pod
enableSurge bool
maxSurge int
maxUnavailable int
emptyNodes int
Err error
}{
{
name: "No nodes",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(0))
return ds
}(),
nodeToPods: make(map[string][]*v1.Pod),
maxUnavailable: 0,
emptyNodes: 0,
},
{
name: "Two nodes with ready pods",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(1))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
markPodReady(pod0)
markPodReady(pod1)
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 1,
emptyNodes: 0,
},
{
name: "Two nodes, one node without pods",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(0))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
markPodReady(pod0)
mapping["node-0"] = []*v1.Pod{pod0}
return mapping
}(),
maxUnavailable: 1,
emptyNodes: 1,
},
{
name: "Two nodes, one node without pods, surge",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(0))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
markPodReady(pod0)
mapping["node-0"] = []*v1.Pod{pod0}
return mapping
}(),
maxUnavailable: 1,
emptyNodes: 1,
},
{
name: "Two nodes with pods, MaxUnavailable in percents",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromString("50%"))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
markPodReady(pod0)
markPodReady(pod1)
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 1,
emptyNodes: 0,
},
{
name: "Two nodes with pods, MaxUnavailable in percents, surge",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromString("50%"))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
markPodReady(pod0)
markPodReady(pod1)
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
enableSurge: true,
maxSurge: 1,
maxUnavailable: 0,
emptyNodes: 0,
},
{
name: "Two nodes with pods, MaxUnavailable is 100%, surge",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 2, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromString("100%"))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
markPodReady(pod0)
markPodReady(pod1)
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
enableSurge: true,
maxSurge: 2,
maxUnavailable: 0,
emptyNodes: 0,
},
{
name: "Two nodes with pods, MaxUnavailable in percents, pod terminating",
Manager: func() *daemonSetsController {
manager, _, _, err := newTestController()
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
addNodes(manager.nodeStore, 0, 3, nil)
return manager
}(),
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromString("50%"))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
mapping := make(map[string][]*v1.Pod)
pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil)
pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil)
now := metav1.Now()
markPodReady(pod0)
markPodReady(pod1)
pod1.DeletionTimestamp = &now
mapping["node-0"] = []*v1.Pod{pod0}
mapping["node-1"] = []*v1.Pod{pod1}
return mapping
}(),
maxUnavailable: 2,
emptyNodes: 1,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DaemonSetUpdateSurge, c.enableSurge)()
c.Manager.dsStore.Add(c.ds)
nodeList, err := c.Manager.nodeLister.List(labels.Everything())
if err != nil {
t.Fatalf("error listing nodes: %v", err)
}
maxSurge, maxUnavailable, err := c.Manager.updatedDesiredNodeCounts(c.ds, nodeList, c.nodeToPods)
if err != nil && c.Err != nil {
if c.Err != err {
t.Fatalf("Expected error: %v but got: %v", c.Err, err)
}
}
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if maxSurge != c.maxSurge || maxUnavailable != c.maxUnavailable {
t.Errorf("Wrong values. maxSurge: %d, expected %d, maxUnavailable: %d, expected: %d", maxSurge, c.maxSurge, maxUnavailable, c.maxUnavailable)
}
var emptyNodes int
for _, pods := range c.nodeToPods {
if len(pods) == 0 {
emptyNodes++
}
}
if emptyNodes != c.emptyNodes {
t.Errorf("expected numEmpty to be %d, was %d", c.emptyNodes, emptyNodes)
}
})
}
}
| pkg/controller/daemon/update_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.0008245292701758444,
0.0001803808263503015,
0.00015588646056130528,
0.00016982803936116397,
0.00008134645031532273
] |
{
"id": 5,
"code_window": [
"\to := &GenericControllerManagerConfigurationOptions{\n",
"\t\tGenericControllerManagerConfiguration: cfg,\n",
"\t\tDebugging: RecommendedDebuggingOptions(),\n",
"\t\tLeaderMigration: nil,\n",
"\t}\n",
"\n",
"\treturn o\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/controller-manager/options/generic.go",
"type": "replace",
"edit_start_line_idx": 44
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"net"
"reflect"
"sort"
"testing"
"time"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/diff"
apiserveroptions "k8s.io/apiserver/pkg/server/options"
cpconfig "k8s.io/cloud-provider/config"
serviceconfig "k8s.io/cloud-provider/controllers/service/config"
cpoptions "k8s.io/cloud-provider/options"
componentbaseconfig "k8s.io/component-base/config"
"k8s.io/component-base/logs"
"k8s.io/component-base/metrics"
cmconfig "k8s.io/controller-manager/config"
cmoptions "k8s.io/controller-manager/options"
kubecontrollerconfig "k8s.io/kubernetes/cmd/kube-controller-manager/app/config"
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
csrsigningconfig "k8s.io/kubernetes/pkg/controller/certificates/signer/config"
cronjobconfig "k8s.io/kubernetes/pkg/controller/cronjob/config"
daemonconfig "k8s.io/kubernetes/pkg/controller/daemon/config"
deploymentconfig "k8s.io/kubernetes/pkg/controller/deployment/config"
endpointconfig "k8s.io/kubernetes/pkg/controller/endpoint/config"
endpointsliceconfig "k8s.io/kubernetes/pkg/controller/endpointslice/config"
endpointslicemirroringconfig "k8s.io/kubernetes/pkg/controller/endpointslicemirroring/config"
garbagecollectorconfig "k8s.io/kubernetes/pkg/controller/garbagecollector/config"
jobconfig "k8s.io/kubernetes/pkg/controller/job/config"
namespaceconfig "k8s.io/kubernetes/pkg/controller/namespace/config"
nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config"
nodelifecycleconfig "k8s.io/kubernetes/pkg/controller/nodelifecycle/config"
poautosclerconfig "k8s.io/kubernetes/pkg/controller/podautoscaler/config"
podgcconfig "k8s.io/kubernetes/pkg/controller/podgc/config"
replicasetconfig "k8s.io/kubernetes/pkg/controller/replicaset/config"
replicationconfig "k8s.io/kubernetes/pkg/controller/replication/config"
resourcequotaconfig "k8s.io/kubernetes/pkg/controller/resourcequota/config"
serviceaccountconfig "k8s.io/kubernetes/pkg/controller/serviceaccount/config"
statefulsetconfig "k8s.io/kubernetes/pkg/controller/statefulset/config"
ttlafterfinishedconfig "k8s.io/kubernetes/pkg/controller/ttlafterfinished/config"
attachdetachconfig "k8s.io/kubernetes/pkg/controller/volume/attachdetach/config"
persistentvolumeconfig "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/config"
)
var args = []string{
"--address=192.168.4.10",
"--allocate-node-cidrs=true",
"--attach-detach-reconcile-sync-period=30s",
"--cidr-allocator-type=CloudAllocator",
"--cloud-config=/cloud-config",
"--cloud-provider=gce",
"--cluster-cidr=1.2.3.4/24",
"--cluster-name=k8s",
"--cluster-signing-cert-file=/cluster-signing-cert",
"--cluster-signing-key-file=/cluster-signing-key",
"--cluster-signing-kubelet-serving-cert-file=/cluster-signing-kubelet-serving/cert-file",
"--cluster-signing-kubelet-serving-key-file=/cluster-signing-kubelet-serving/key-file",
"--cluster-signing-kubelet-client-cert-file=/cluster-signing-kubelet-client/cert-file",
"--cluster-signing-kubelet-client-key-file=/cluster-signing-kubelet-client/key-file",
"--cluster-signing-kube-apiserver-client-cert-file=/cluster-signing-kube-apiserver-client/cert-file",
"--cluster-signing-kube-apiserver-client-key-file=/cluster-signing-kube-apiserver-client/key-file",
"--cluster-signing-legacy-unknown-cert-file=/cluster-signing-legacy-unknown/cert-file",
"--cluster-signing-legacy-unknown-key-file=/cluster-signing-legacy-unknown/key-file",
"--concurrent-deployment-syncs=10",
"--concurrent-statefulset-syncs=15",
"--concurrent-endpoint-syncs=10",
"--concurrent-service-endpoint-syncs=10",
"--concurrent-gc-syncs=30",
"--concurrent-namespace-syncs=20",
"--concurrent-replicaset-syncs=10",
"--concurrent-resource-quota-syncs=10",
"--concurrent-service-syncs=2",
"--concurrent-serviceaccount-token-syncs=10",
"--concurrent_rc_syncs=10",
"--configure-cloud-routes=false",
"--contention-profiling=true",
"--controller-start-interval=2m",
"--controllers=foo,bar",
"--deployment-controller-sync-period=45s",
"--disable-attach-detach-reconcile-sync=true",
"--enable-dynamic-provisioning=false",
"--enable-garbage-collector=false",
"--enable-hostpath-provisioner=true",
"--enable-taint-manager=false",
"--cluster-signing-duration=10h",
"--flex-volume-plugin-dir=/flex-volume-plugin",
"--volume-host-cidr-denylist=127.0.0.1/28,feed::/16",
"--volume-host-allow-local-loopback=false",
"--horizontal-pod-autoscaler-downscale-delay=2m",
"--horizontal-pod-autoscaler-sync-period=45s",
"--horizontal-pod-autoscaler-upscale-delay=1m",
"--horizontal-pod-autoscaler-downscale-stabilization=3m",
"--horizontal-pod-autoscaler-cpu-initialization-period=90s",
"--horizontal-pod-autoscaler-initial-readiness-delay=50s",
"--http2-max-streams-per-connection=47",
"--kube-api-burst=100",
"--kube-api-content-type=application/json",
"--kube-api-qps=50.0",
"--kubeconfig=/kubeconfig",
"--large-cluster-size-threshold=100",
"--leader-elect=false",
"--leader-elect-lease-duration=30s",
"--leader-elect-renew-deadline=15s",
"--leader-elect-resource-lock=configmap",
"--leader-elect-retry-period=5s",
"--master=192.168.4.20",
"--max-endpoints-per-slice=200",
"--min-resync-period=8h",
"--mirroring-concurrent-service-endpoint-syncs=2",
"--mirroring-max-endpoints-per-subset=1000",
"--namespace-sync-period=10m",
"--node-cidr-mask-size=48",
"--node-cidr-mask-size-ipv4=48",
"--node-cidr-mask-size-ipv6=108",
"--node-eviction-rate=0.2",
"--node-monitor-grace-period=30s",
"--node-monitor-period=10s",
"--node-startup-grace-period=30s",
"--pod-eviction-timeout=2m",
"--port=10000",
"--profiling=false",
"--pv-recycler-increment-timeout-nfs=45",
"--pv-recycler-minimum-timeout-hostpath=45",
"--pv-recycler-minimum-timeout-nfs=200",
"--pv-recycler-timeout-increment-hostpath=45",
"--pvclaimbinder-sync-period=30s",
"--resource-quota-sync-period=10m",
"--route-reconciliation-period=30s",
"--secondary-node-eviction-rate=0.05",
"--service-account-private-key-file=/service-account-private-key",
"--terminated-pod-gc-threshold=12000",
"--unhealthy-zone-threshold=0.6",
"--use-service-account-credentials=true",
"--cert-dir=/a/b/c",
"--bind-address=192.168.4.21",
"--secure-port=10001",
"--concurrent-ttl-after-finished-syncs=8",
}
func TestAddFlags(t *testing.T) {
fs := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError)
s, _ := NewKubeControllerManagerOptions()
for _, f := range s.Flags([]string{""}, []string{""}).FlagSets {
fs.AddFlagSet(f)
}
fs.Parse(args)
// Sort GCIgnoredResources because it's built from a map, which means the
// insertion order is random.
sort.Sort(sortedGCIgnoredResources(s.GarbageCollectorController.GCIgnoredResources))
expected := &KubeControllerManagerOptions{
Generic: &cmoptions.GenericControllerManagerConfigurationOptions{
GenericControllerManagerConfiguration: &cmconfig.GenericControllerManagerConfiguration{
Port: 10252, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
MinResyncPeriod: metav1.Duration{Duration: 8 * time.Hour},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
ContentType: "application/json",
QPS: 50.0,
Burst: 100,
},
ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
ResourceLock: "configmap",
LeaderElect: false,
LeaseDuration: metav1.Duration{Duration: 30 * time.Second},
RenewDeadline: metav1.Duration{Duration: 15 * time.Second},
RetryPeriod: metav1.Duration{Duration: 5 * time.Second},
ResourceName: "kube-controller-manager",
ResourceNamespace: "kube-system",
},
Controllers: []string{"foo", "bar"},
},
Debugging: &cmoptions.DebuggingOptions{
DebuggingConfiguration: &componentbaseconfig.DebuggingConfiguration{
EnableProfiling: false,
EnableContentionProfiling: true,
},
},
},
KubeCloudShared: &cpoptions.KubeCloudSharedOptions{
KubeCloudSharedConfiguration: &cpconfig.KubeCloudSharedConfiguration{
UseServiceAccountCredentials: true,
RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},
NodeMonitorPeriod: metav1.Duration{Duration: 10 * time.Second},
ClusterName: "k8s",
ClusterCIDR: "1.2.3.4/24",
AllocateNodeCIDRs: true,
CIDRAllocatorType: "CloudAllocator",
ConfigureCloudRoutes: false,
},
CloudProvider: &cpoptions.CloudProviderOptions{
CloudProviderConfiguration: &cpconfig.CloudProviderConfiguration{
Name: "gce",
CloudConfigFile: "/cloud-config",
},
},
},
ServiceController: &cpoptions.ServiceControllerOptions{
ServiceControllerConfiguration: &serviceconfig.ServiceControllerConfiguration{
ConcurrentServiceSyncs: 2,
},
},
AttachDetachController: &AttachDetachControllerOptions{
&attachdetachconfig.AttachDetachControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 30 * time.Second},
DisableAttachDetachReconcilerSync: true,
},
},
CSRSigningController: &CSRSigningControllerOptions{
&csrsigningconfig.CSRSigningControllerConfiguration{
ClusterSigningCertFile: "/cluster-signing-cert",
ClusterSigningKeyFile: "/cluster-signing-key",
ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour},
KubeletServingSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-serving/cert-file",
KeyFile: "/cluster-signing-kubelet-serving/key-file",
},
KubeletClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-client/cert-file",
KeyFile: "/cluster-signing-kubelet-client/key-file",
},
KubeAPIServerClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kube-apiserver-client/cert-file",
KeyFile: "/cluster-signing-kube-apiserver-client/key-file",
},
LegacyUnknownSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-legacy-unknown/cert-file",
KeyFile: "/cluster-signing-legacy-unknown/key-file",
},
},
},
DaemonSetController: &DaemonSetControllerOptions{
&daemonconfig.DaemonSetControllerConfiguration{
ConcurrentDaemonSetSyncs: 2,
},
},
DeploymentController: &DeploymentControllerOptions{
&deploymentconfig.DeploymentControllerConfiguration{
ConcurrentDeploymentSyncs: 10,
DeploymentControllerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
},
},
StatefulSetController: &StatefulSetControllerOptions{
&statefulsetconfig.StatefulSetControllerConfiguration{
ConcurrentStatefulSetSyncs: 15,
},
},
DeprecatedFlags: &DeprecatedControllerOptions{
&kubectrlmgrconfig.DeprecatedControllerConfiguration{
DeletingPodsQPS: 0.1,
RegisterRetryCount: 10,
},
},
EndpointController: &EndpointControllerOptions{
&endpointconfig.EndpointControllerConfiguration{
ConcurrentEndpointSyncs: 10,
},
},
EndpointSliceController: &EndpointSliceControllerOptions{
&endpointsliceconfig.EndpointSliceControllerConfiguration{
ConcurrentServiceEndpointSyncs: 10,
MaxEndpointsPerSlice: 200,
},
},
EndpointSliceMirroringController: &EndpointSliceMirroringControllerOptions{
&endpointslicemirroringconfig.EndpointSliceMirroringControllerConfiguration{
MirroringConcurrentServiceEndpointSyncs: 2,
MirroringMaxEndpointsPerSubset: 1000,
},
},
GarbageCollectorController: &GarbageCollectorControllerOptions{
&garbagecollectorconfig.GarbageCollectorControllerConfiguration{
ConcurrentGCSyncs: 30,
GCIgnoredResources: []garbagecollectorconfig.GroupResource{
{Group: "", Resource: "events"},
},
EnableGarbageCollector: false,
},
},
HPAController: &HPAControllerOptions{
&poautosclerconfig.HPAControllerConfiguration{
HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute},
HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute},
HorizontalPodAutoscalerDownscaleStabilizationWindow: metav1.Duration{Duration: 3 * time.Minute},
HorizontalPodAutoscalerCPUInitializationPeriod: metav1.Duration{Duration: 90 * time.Second},
HorizontalPodAutoscalerInitialReadinessDelay: metav1.Duration{Duration: 50 * time.Second},
HorizontalPodAutoscalerTolerance: 0.1,
HorizontalPodAutoscalerUseRESTClients: true,
},
},
JobController: &JobControllerOptions{
&jobconfig.JobControllerConfiguration{
ConcurrentJobSyncs: 5,
},
},
CronJobController: &CronJobControllerOptions{
&cronjobconfig.CronJobControllerConfiguration{
ConcurrentCronJobSyncs: 5,
},
},
NamespaceController: &NamespaceControllerOptions{
&namespaceconfig.NamespaceControllerConfiguration{
NamespaceSyncPeriod: metav1.Duration{Duration: 10 * time.Minute},
ConcurrentNamespaceSyncs: 20,
},
},
NodeIPAMController: &NodeIPAMControllerOptions{
&nodeipamconfig.NodeIPAMControllerConfiguration{
NodeCIDRMaskSize: 48,
NodeCIDRMaskSizeIPv4: 48,
NodeCIDRMaskSizeIPv6: 108,
},
},
NodeLifecycleController: &NodeLifecycleControllerOptions{
&nodelifecycleconfig.NodeLifecycleControllerConfiguration{
EnableTaintManager: false,
NodeEvictionRate: 0.2,
SecondaryNodeEvictionRate: 0.05,
NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second},
NodeStartupGracePeriod: metav1.Duration{Duration: 30 * time.Second},
PodEvictionTimeout: metav1.Duration{Duration: 2 * time.Minute},
LargeClusterSizeThreshold: 100,
UnhealthyZoneThreshold: 0.6,
},
},
PersistentVolumeBinderController: &PersistentVolumeBinderControllerOptions{
&persistentvolumeconfig.PersistentVolumeBinderControllerConfiguration{
PVClaimBinderSyncPeriod: metav1.Duration{Duration: 30 * time.Second},
VolumeConfiguration: persistentvolumeconfig.VolumeConfiguration{
EnableDynamicProvisioning: false,
EnableHostPathProvisioning: true,
FlexVolumePluginDir: "/flex-volume-plugin",
PersistentVolumeRecyclerConfiguration: persistentvolumeconfig.PersistentVolumeRecyclerConfiguration{
MaximumRetry: 3,
MinimumTimeoutNFS: 200,
IncrementTimeoutNFS: 45,
MinimumTimeoutHostPath: 45,
IncrementTimeoutHostPath: 45,
},
},
VolumeHostCIDRDenylist: []string{"127.0.0.1/28", "feed::/16"},
VolumeHostAllowLocalLoopback: false,
},
},
PodGCController: &PodGCControllerOptions{
&podgcconfig.PodGCControllerConfiguration{
TerminatedPodGCThreshold: 12000,
},
},
ReplicaSetController: &ReplicaSetControllerOptions{
&replicasetconfig.ReplicaSetControllerConfiguration{
ConcurrentRSSyncs: 10,
},
},
ReplicationController: &ReplicationControllerOptions{
&replicationconfig.ReplicationControllerConfiguration{
ConcurrentRCSyncs: 10,
},
},
ResourceQuotaController: &ResourceQuotaControllerOptions{
&resourcequotaconfig.ResourceQuotaControllerConfiguration{
ResourceQuotaSyncPeriod: metav1.Duration{Duration: 10 * time.Minute},
ConcurrentResourceQuotaSyncs: 10,
},
},
SAController: &SAControllerOptions{
&serviceaccountconfig.SAControllerConfiguration{
ServiceAccountKeyFile: "/service-account-private-key",
ConcurrentSATokenSyncs: 10,
},
},
TTLAfterFinishedController: &TTLAfterFinishedControllerOptions{
&ttlafterfinishedconfig.TTLAfterFinishedControllerConfiguration{
ConcurrentTTLSyncs: 8,
},
},
SecureServing: (&apiserveroptions.SecureServingOptions{
BindPort: 10001,
BindAddress: net.ParseIP("192.168.4.21"),
ServerCert: apiserveroptions.GeneratableKeyCert{
CertDirectory: "/a/b/c",
PairName: "kube-controller-manager",
},
HTTP2MaxStreamsPerConnection: 47,
}).WithLoopback(),
InsecureServing: (&apiserveroptions.DeprecatedInsecureServingOptions{
BindAddress: net.ParseIP("192.168.4.10"),
BindPort: int(10000),
BindNetwork: "tcp",
}).WithLoopback(),
Authentication: &apiserveroptions.DelegatingAuthenticationOptions{
CacheTTL: 10 * time.Second,
ClientTimeout: 10 * time.Second,
WebhookRetryBackoff: apiserveroptions.DefaultAuthWebhookRetryBackoff(),
ClientCert: apiserveroptions.ClientCertAuthenticationOptions{},
RequestHeader: apiserveroptions.RequestHeaderAuthenticationOptions{
UsernameHeaders: []string{"x-remote-user"},
GroupHeaders: []string{"x-remote-group"},
ExtraHeaderPrefixes: []string{"x-remote-extra-"},
},
RemoteKubeConfigFileOptional: true,
},
Authorization: &apiserveroptions.DelegatingAuthorizationOptions{
AllowCacheTTL: 10 * time.Second,
DenyCacheTTL: 10 * time.Second,
ClientTimeout: 10 * time.Second,
WebhookRetryBackoff: apiserveroptions.DefaultAuthWebhookRetryBackoff(),
RemoteKubeConfigFileOptional: true,
AlwaysAllowPaths: []string{"/healthz", "/readyz", "/livez"}, // note: this does not match /healthz/ or /healthz/*
AlwaysAllowGroups: []string{"system:masters"},
},
Kubeconfig: "/kubeconfig",
Master: "192.168.4.20",
Metrics: &metrics.Options{},
Logs: logs.NewOptions(),
}
// Sort GCIgnoredResources because it's built from a map, which means the
// insertion order is random.
sort.Sort(sortedGCIgnoredResources(expected.GarbageCollectorController.GCIgnoredResources))
if !reflect.DeepEqual(expected, s) {
t.Errorf("Got different run options than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(expected, s))
}
}
func TestApplyTo(t *testing.T) {
fs := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError)
s, _ := NewKubeControllerManagerOptions()
// flag set to parse the args that are required to start the kube controller manager
for _, f := range s.Flags([]string{""}, []string{""}).FlagSets {
fs.AddFlagSet(f)
}
fs.Parse(args)
// Sort GCIgnoredResources because it's built from a map, which means the
// insertion order is random.
sort.Sort(sortedGCIgnoredResources(s.GarbageCollectorController.GCIgnoredResources))
expected := &kubecontrollerconfig.Config{
ComponentConfig: kubectrlmgrconfig.KubeControllerManagerConfiguration{
Generic: cmconfig.GenericControllerManagerConfiguration{
Port: 10252, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
MinResyncPeriod: metav1.Duration{Duration: 8 * time.Hour},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
ContentType: "application/json",
QPS: 50.0,
Burst: 100,
},
ControllerStartInterval: metav1.Duration{Duration: 2 * time.Minute},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
ResourceLock: "configmap",
LeaderElect: false,
LeaseDuration: metav1.Duration{Duration: 30 * time.Second},
RenewDeadline: metav1.Duration{Duration: 15 * time.Second},
RetryPeriod: metav1.Duration{Duration: 5 * time.Second},
ResourceName: "kube-controller-manager",
ResourceNamespace: "kube-system",
},
Controllers: []string{"foo", "bar"},
Debugging: componentbaseconfig.DebuggingConfiguration{
EnableProfiling: false,
EnableContentionProfiling: true,
},
},
KubeCloudShared: cpconfig.KubeCloudSharedConfiguration{
UseServiceAccountCredentials: true,
RouteReconciliationPeriod: metav1.Duration{Duration: 30 * time.Second},
NodeMonitorPeriod: metav1.Duration{Duration: 10 * time.Second},
ClusterName: "k8s",
ClusterCIDR: "1.2.3.4/24",
AllocateNodeCIDRs: true,
CIDRAllocatorType: "CloudAllocator",
ConfigureCloudRoutes: false,
CloudProvider: cpconfig.CloudProviderConfiguration{
Name: "gce",
CloudConfigFile: "/cloud-config",
},
},
ServiceController: serviceconfig.ServiceControllerConfiguration{
ConcurrentServiceSyncs: 2,
},
AttachDetachController: attachdetachconfig.AttachDetachControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 30 * time.Second},
DisableAttachDetachReconcilerSync: true,
},
CSRSigningController: csrsigningconfig.CSRSigningControllerConfiguration{
ClusterSigningCertFile: "/cluster-signing-cert",
ClusterSigningKeyFile: "/cluster-signing-key",
ClusterSigningDuration: metav1.Duration{Duration: 10 * time.Hour},
KubeletServingSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-serving/cert-file",
KeyFile: "/cluster-signing-kubelet-serving/key-file",
},
KubeletClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kubelet-client/cert-file",
KeyFile: "/cluster-signing-kubelet-client/key-file",
},
KubeAPIServerClientSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-kube-apiserver-client/cert-file",
KeyFile: "/cluster-signing-kube-apiserver-client/key-file",
},
LegacyUnknownSignerConfiguration: csrsigningconfig.CSRSigningConfiguration{
CertFile: "/cluster-signing-legacy-unknown/cert-file",
KeyFile: "/cluster-signing-legacy-unknown/key-file",
},
},
DaemonSetController: daemonconfig.DaemonSetControllerConfiguration{
ConcurrentDaemonSetSyncs: 2,
},
DeploymentController: deploymentconfig.DeploymentControllerConfiguration{
ConcurrentDeploymentSyncs: 10,
DeploymentControllerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
},
StatefulSetController: statefulsetconfig.StatefulSetControllerConfiguration{
ConcurrentStatefulSetSyncs: 15,
},
DeprecatedController: kubectrlmgrconfig.DeprecatedControllerConfiguration{
DeletingPodsQPS: 0.1,
RegisterRetryCount: 10,
},
EndpointController: endpointconfig.EndpointControllerConfiguration{
ConcurrentEndpointSyncs: 10,
},
EndpointSliceController: endpointsliceconfig.EndpointSliceControllerConfiguration{
ConcurrentServiceEndpointSyncs: 10,
MaxEndpointsPerSlice: 200,
},
EndpointSliceMirroringController: endpointslicemirroringconfig.EndpointSliceMirroringControllerConfiguration{
MirroringConcurrentServiceEndpointSyncs: 2,
MirroringMaxEndpointsPerSubset: 1000,
},
GarbageCollectorController: garbagecollectorconfig.GarbageCollectorControllerConfiguration{
ConcurrentGCSyncs: 30,
GCIgnoredResources: []garbagecollectorconfig.GroupResource{
{Group: "", Resource: "events"},
},
EnableGarbageCollector: false,
},
HPAController: poautosclerconfig.HPAControllerConfiguration{
HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 45 * time.Second},
HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 1 * time.Minute},
HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 2 * time.Minute},
HorizontalPodAutoscalerDownscaleStabilizationWindow: metav1.Duration{Duration: 3 * time.Minute},
HorizontalPodAutoscalerCPUInitializationPeriod: metav1.Duration{Duration: 90 * time.Second},
HorizontalPodAutoscalerInitialReadinessDelay: metav1.Duration{Duration: 50 * time.Second},
HorizontalPodAutoscalerTolerance: 0.1,
HorizontalPodAutoscalerUseRESTClients: true,
},
JobController: jobconfig.JobControllerConfiguration{
ConcurrentJobSyncs: 5,
},
CronJobController: cronjobconfig.CronJobControllerConfiguration{
ConcurrentCronJobSyncs: 5,
},
NamespaceController: namespaceconfig.NamespaceControllerConfiguration{
NamespaceSyncPeriod: metav1.Duration{Duration: 10 * time.Minute},
ConcurrentNamespaceSyncs: 20,
},
NodeIPAMController: nodeipamconfig.NodeIPAMControllerConfiguration{
NodeCIDRMaskSize: 48,
NodeCIDRMaskSizeIPv4: 48,
NodeCIDRMaskSizeIPv6: 108,
},
NodeLifecycleController: nodelifecycleconfig.NodeLifecycleControllerConfiguration{
EnableTaintManager: false,
NodeEvictionRate: 0.2,
SecondaryNodeEvictionRate: 0.05,
NodeMonitorGracePeriod: metav1.Duration{Duration: 30 * time.Second},
NodeStartupGracePeriod: metav1.Duration{Duration: 30 * time.Second},
PodEvictionTimeout: metav1.Duration{Duration: 2 * time.Minute},
LargeClusterSizeThreshold: 100,
UnhealthyZoneThreshold: 0.6,
},
PersistentVolumeBinderController: persistentvolumeconfig.PersistentVolumeBinderControllerConfiguration{
PVClaimBinderSyncPeriod: metav1.Duration{Duration: 30 * time.Second},
VolumeConfiguration: persistentvolumeconfig.VolumeConfiguration{
EnableDynamicProvisioning: false,
EnableHostPathProvisioning: true,
FlexVolumePluginDir: "/flex-volume-plugin",
PersistentVolumeRecyclerConfiguration: persistentvolumeconfig.PersistentVolumeRecyclerConfiguration{
MaximumRetry: 3,
MinimumTimeoutNFS: 200,
IncrementTimeoutNFS: 45,
MinimumTimeoutHostPath: 45,
IncrementTimeoutHostPath: 45,
},
},
VolumeHostCIDRDenylist: []string{"127.0.0.1/28", "feed::/16"},
VolumeHostAllowLocalLoopback: false,
},
PodGCController: podgcconfig.PodGCControllerConfiguration{
TerminatedPodGCThreshold: 12000,
},
ReplicaSetController: replicasetconfig.ReplicaSetControllerConfiguration{
ConcurrentRSSyncs: 10,
},
ReplicationController: replicationconfig.ReplicationControllerConfiguration{
ConcurrentRCSyncs: 10,
},
ResourceQuotaController: resourcequotaconfig.ResourceQuotaControllerConfiguration{
ResourceQuotaSyncPeriod: metav1.Duration{Duration: 10 * time.Minute},
ConcurrentResourceQuotaSyncs: 10,
},
SAController: serviceaccountconfig.SAControllerConfiguration{
ServiceAccountKeyFile: "/service-account-private-key",
ConcurrentSATokenSyncs: 10,
},
TTLAfterFinishedController: ttlafterfinishedconfig.TTLAfterFinishedControllerConfiguration{
ConcurrentTTLSyncs: 8,
},
},
}
// Sort GCIgnoredResources because it's built from a map, which means the
// insertion order is random.
sort.Sort(sortedGCIgnoredResources(expected.ComponentConfig.GarbageCollectorController.GCIgnoredResources))
c := &kubecontrollerconfig.Config{}
s.ApplyTo(c)
if !reflect.DeepEqual(expected.ComponentConfig, c.ComponentConfig) {
t.Errorf("Got different configuration than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(expected.ComponentConfig, c.ComponentConfig))
}
}
type sortedGCIgnoredResources []garbagecollectorconfig.GroupResource
func (r sortedGCIgnoredResources) Len() int {
return len(r)
}
func (r sortedGCIgnoredResources) Less(i, j int) bool {
if r[i].Group < r[j].Group {
return true
} else if r[i].Group > r[j].Group {
return false
}
return r[i].Resource < r[j].Resource
}
func (r sortedGCIgnoredResources) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
| cmd/kube-controller-manager/app/options/options_test.go | 1 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.006719296332448721,
0.000281510321656242,
0.00016332510858774185,
0.00016868139209691435,
0.0007944538374431431
] |
{
"id": 5,
"code_window": [
"\to := &GenericControllerManagerConfigurationOptions{\n",
"\t\tGenericControllerManagerConfiguration: cfg,\n",
"\t\tDebugging: RecommendedDebuggingOptions(),\n",
"\t\tLeaderMigration: nil,\n",
"\t}\n",
"\n",
"\treturn o\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/controller-manager/options/generic.go",
"type": "replace",
"edit_start_line_idx": 44
} | // +build !providerless
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package routeclient
import (
"context"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
"k8s.io/legacy-cloud-providers/azure/retry"
)
const (
// APIVersion is the API version for network.
APIVersion = "2019-06-01"
)
// Interface is the client interface for Route.
// Don't forget to run the following command to generate the mock client:
// mockgen -source=$GOPATH/src/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/clients/routeclient/interface.go -package=mockrouteclient Interface > $GOPATH/src/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/clients/routeclient/mockrouteclient/interface.go
type Interface interface {
// CreateOrUpdate creates or updates a Route.
CreateOrUpdate(ctx context.Context, resourceGroupName string, routeTableName string, routeName string, routeParameters network.Route, etag string) *retry.Error
// Delete deletes a Route by name.
Delete(ctx context.Context, resourceGroupName string, routeTableName string, routeName string) *retry.Error
}
| staging/src/k8s.io/legacy-cloud-providers/azure/clients/routeclient/interface.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.0001786240318324417,
0.0001694418751867488,
0.00016154210607055575,
0.00016760638391133398,
0.0000058252408052794635
] |
{
"id": 5,
"code_window": [
"\to := &GenericControllerManagerConfigurationOptions{\n",
"\t\tGenericControllerManagerConfiguration: cfg,\n",
"\t\tDebugging: RecommendedDebuggingOptions(),\n",
"\t\tLeaderMigration: nil,\n",
"\t}\n",
"\n",
"\treturn o\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/controller-manager/options/generic.go",
"type": "replace",
"edit_start_line_idx": 44
} | {
"kind": "NetworkPolicy",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "2",
"generateName": "3",
"namespace": "4",
"selfLink": "5",
"uid": "7",
"resourceVersion": "11042405498087606203",
"generation": 8071137005907523419,
"creationTimestamp": null,
"deletionGracePeriodSeconds": -4955867275792137171,
"labels": {
"7": "8"
},
"annotations": {
"9": "10"
},
"ownerReferences": [
{
"apiVersion": "11",
"kind": "12",
"name": "13",
"uid": "Dz廔ȇ{sŊƏp",
"controller": false,
"blockOwnerDeletion": true
}
],
"finalizers": [
"14"
],
"clusterName": "15",
"managedFields": [
{
"manager": "16",
"operation": "鐊唊飙Ş-U圴÷a/ɔ}摁(湗Ć]",
"apiVersion": "17",
"fieldsType": "18"
}
]
},
"spec": {
"podSelector": {
"matchLabels": {
"8---jop9641lg.p-g8c2-k-912e5-c-e63-n-3n/E9.8ThjT9s-j41-0-6p-JFHn7y-74.-0MUORQQ.N2.3": "68._bQw.-dG6c-.6--_x.--0wmZk1_8._3s_-_Bq.m_4"
},
"matchExpressions": [
{
"key": "p503---477-49p---o61---4fy--9---7--9-9s-0-u5lj2--10pq-0-7-9-2-0/fP81.-.9Vdx.TB_M-H_5_.t..bG0",
"operator": "In",
"values": [
"D07.a_.y_y_o0_5qN2_---_M.N_._a6.9bHjdH.-.5_.I8__n"
]
}
]
},
"ingress": [
{
"ports": [
{
"protocol": "Ǐ2啗塧ȱ蓿彭聡A3fƻfʣ",
"port": 2,
"endPort": -420211493
}
],
"from": [
{
"podSelector": {
"matchLabels": {
"5__.h-J-M.9_T.q-o7.y-SQ.9A-F-.4--_vLW.jj-.5B.._.5_3-4": "31-4.xXe..03f_--0..L.0qQ6W-.d.20h-OK-_g"
},
"matchExpressions": [
{
"key": "R6S17_.8CnK_O.d-._NwcGnP-w-Sf5_Or.i1_7z.WH-.._Td2-N_Y.v",
"operator": "Exists"
}
]
},
"namespaceSelector": {
"matchLabels": {
"pl6-2-316/NgO-d.iUaC_wYSJfB._.zS-._..3le-Q4-R-083.S5": "U_D__6t-2.-_-8wE._._3.-.83_iq_-y.-25C.A-j..9dfn3Y8d_0_.-y"
},
"matchExpressions": [
{
"key": "f9wk-3--652xh.2a-ik-ak---r0nh-9289---x-p-qpt6-1w-3205c1lxeqyn-5--9d5a3-7bq/4FpF_W-1._-vL_i.-_-a--G-I.-_Y33--.8U.-.5--_zm-.-_RJt2X",
"operator": "In",
"values": [
"g4"
]
}
]
},
"ipBlock": {
"cidr": "37",
"except": [
"38"
]
}
}
]
}
],
"egress": [
{
"ports": [
{
"protocol": "s3!Zɾģ毋",
"port": 3,
"endPort": -630252364
}
],
"to": [
{
"podSelector": {
"matchLabels": {
"P1s-V.9.3": "9..c_uo3a"
},
"matchExpressions": [
{
"key": "1_o_p665O_4Gj._BXt.O-7___-Y_um-_8r--684._-_18_...E.-2oy",
"operator": "DoesNotExist"
}
]
},
"namespaceSelector": {
"matchLabels": {
"5l-59g-qy5--ar-gn58nc2-3--6-o-h-9-15v-5925a-x12a-214-3sc/M.JP_oA_4A.J2s3.XL6_EU--AH-Q.GM7B": "N-_-vv-Q2qz.W..4....-h._.GgT7_7B_D-..-.k4uz"
},
"matchExpressions": [
{
"key": "7u-tie4-7--gm3.38vl-1z---883d-v3j4-7y-p--u/d-4_4--.-_Z4.LA3HVG93_._.I3.__-.0-z_z0sn8",
"operator": "DoesNotExist"
}
]
},
"ipBlock": {
"cidr": "51",
"except": [
"52"
]
}
}
]
}
],
"policyTypes": [
"(dŊiɢz"
]
}
} | staging/src/k8s.io/api/testdata/HEAD/extensions.v1beta1.NetworkPolicy.json | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00017875795310828835,
0.0001730612711980939,
0.0001700482243904844,
0.00017309223767369986,
0.0000021580681277555414
] |
{
"id": 5,
"code_window": [
"\to := &GenericControllerManagerConfigurationOptions{\n",
"\t\tGenericControllerManagerConfiguration: cfg,\n",
"\t\tDebugging: RecommendedDebuggingOptions(),\n",
"\t\tLeaderMigration: nil,\n",
"\t}\n",
"\n",
"\treturn o\n",
"}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tLeaderMigration: &migration.LeaderMigrationOptions{},\n"
],
"file_path": "staging/src/k8s.io/controller-manager/options/generic.go",
"type": "replace",
"edit_start_line_idx": 44
} | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"fmt"
"io/ioutil"
"os"
"k8s.io/apimachinery/pkg/api/resource"
"testing"
)
func TestDiskUsage(t *testing.T) {
dir1, err := ioutil.TempDir("", "dir_1")
if err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
defer os.RemoveAll(dir1)
tmpfile1, err := ioutil.TempFile(dir1, "test")
if _, err = tmpfile1.WriteString("just for testing"); err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
dir2, err := ioutil.TempDir(dir1, "dir_2")
if err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
tmpfile2, err := ioutil.TempFile(dir2, "test")
if _, err = tmpfile2.WriteString("just for testing"); err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
dirInfo1, err := os.Lstat(dir1)
if err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
dirInfo2, err := os.Lstat(dir2)
if err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
file1 := dir1 + "/" + "test"
file2 := dir2 + "/" + "test"
fileInfo1, err := os.Lstat(file1)
if err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
fileInfo2, err := os.Lstat(file2)
if err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
total := dirInfo1.Size() + dirInfo2.Size() + fileInfo1.Size() + fileInfo2.Size()
size, err := DiskUsage(dir1)
if err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
used, err := resource.ParseQuantity(fmt.Sprintf("%d", total))
if err != nil {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
if size.Cmp(used) != 1 {
t.Fatalf("TestDiskUsage failed: %s", err.Error())
}
}
| pkg/volume/util/fs/fs_windows_test.go | 0 | https://github.com/kubernetes/kubernetes/commit/68ebe29529b861cfdd075f76bad5643c58ece70f | [
0.00017885562556330115,
0.0001761049497872591,
0.0001717306295176968,
0.00017607398331165314,
0.000002292297494932427
] |
{
"id": 0,
"code_window": [
"\t\treturn syncErr\n",
"\t}\n",
"\n",
"\t// If the network plugin is not ready, only start the pod if it uses the host network\n",
"\tif rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !podUsesHostNetwork(pod) {\n",
"\t\treturn fmt.Errorf(\"network is not ready: %v\", rs)\n",
"\t}\n",
"\n",
"\t// Create Cgroups for the pod and apply resource parameters\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) {\n"
],
"file_path": "pkg/kubelet/kubelet.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/validation"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/envvars"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/term"
utilvalidation "k8s.io/kubernetes/pkg/util/validation"
"k8s.io/kubernetes/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
)
// Get a list of pods that have data directories.
func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
podInfos, err := ioutil.ReadDir(kl.getPodsDir())
if err != nil {
return nil, err
}
pods := []types.UID{}
for i := range podInfos {
if podInfos[i].IsDir() {
pods = append(pods, types.UID(podInfos[i].Name()))
}
}
return pods, nil
}
// getActivePods returns non-terminal pods
func (kl *Kubelet) getActivePods() []*v1.Pod {
allPods := kl.podManager.GetPods()
activePods := kl.filterOutTerminatedPods(allPods)
return activePods
}
// makeDevices determines the devices for the given container.
// Experimental. For now, we hardcode /dev/nvidia0 no matter what the user asks for
// (we only support one device per node).
// TODO: add support for more than 1 GPU after #28216.
func makeDevices(container *v1.Container) []kubecontainer.DeviceInfo {
nvidiaGPULimit := container.Resources.Limits.NvidiaGPU()
if nvidiaGPULimit.Value() != 0 {
return []kubecontainer.DeviceInfo{
{PathOnHost: "/dev/nvidia0", PathInContainer: "/dev/nvidia0", Permissions: "mrw"},
{PathOnHost: "/dev/nvidiactl", PathInContainer: "/dev/nvidiactl", Permissions: "mrw"},
{PathOnHost: "/dev/nvidia-uvm", PathInContainer: "/dev/nvidia-uvm", Permissions: "mrw"},
}
}
return nil
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) {
// Kubernetes only mounts on /etc/hosts if :
// - container does not use hostNetwork and
// - container is not an infrastructure(pause) container
// - container is not already mounting on /etc/hosts
// When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set.
// OS is not Windows
mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.HostNetwork) && len(podIP) > 0 && runtime.GOOS != "windows"
glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
for _, mount := range container.VolumeMounts {
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
vol, ok := podVolumes[mount.Name]
if !ok {
glog.Warningf("Mount cannot be satisfied for container %q, because the volume is missing: %q", container.Name, mount)
continue
}
relabelVolume := false
// If the volume supports SELinux and it has not been
// relabeled already and it is not a read-only volume,
// relabel it and mark it as labeled
if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled {
vol.SELinuxLabeled = true
relabelVolume = true
}
hostPath, err := volume.GetPath(vol.Mounter)
if err != nil {
return nil, err
}
if mount.SubPath != "" {
hostPath = filepath.Join(hostPath, mount.SubPath)
}
// Docker Volume Mounts fail on Windows if it is not of the form C:/
containerPath := mount.MountPath
if runtime.GOOS == "windows" {
if strings.HasPrefix(hostPath, "/") && !strings.Contains(hostPath, ":") {
hostPath = "c:" + hostPath
}
if strings.HasPrefix(containerPath, "/") && !strings.Contains(containerPath, ":") {
containerPath = "c:" + containerPath
}
}
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
ReadOnly: mount.ReadOnly,
SELinuxRelabel: relabelVolume,
})
}
if mountEtcHostsFile {
hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain)
if err != nil {
return nil, err
}
mounts = append(mounts, *hostsMount)
}
return mounts, nil
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with.
func makeHostsMount(podDir, podIP, hostName, hostDomainName string) (*kubecontainer.Mount, error) {
hostsFilePath := path.Join(podDir, "etc-hosts")
if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName); err != nil {
return nil, err
}
return &kubecontainer.Mount{
Name: "k8s-managed-etc-hosts",
ContainerPath: etcHostsPath,
HostPath: hostsFilePath,
ReadOnly: false,
SELinuxRelabel: true,
}, nil
}
// ensureHostsFile ensures that the given host file has an up-to-date ip, host
// name, and domain name.
func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string) error {
if _, err := os.Stat(fileName); os.IsExist(err) {
glog.V(4).Infof("kubernetes-managed etc-hosts file exits. Will not be recreated: %q", fileName)
return nil
}
var buffer bytes.Buffer
buffer.WriteString("# Kubernetes-managed hosts file.\n")
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost
buffer.WriteString("fe00::0\tip6-localnet\n")
buffer.WriteString("fe00::0\tip6-mcastprefix\n")
buffer.WriteString("fe00::1\tip6-allnodes\n")
buffer.WriteString("fe00::2\tip6-allrouters\n")
if len(hostDomainName) > 0 {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
} else {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
}
return ioutil.WriteFile(fileName, buffer.Bytes(), 0644)
}
func makePortMappings(container *v1.Container) (ports []kubecontainer.PortMapping) {
names := make(map[string]struct{})
for _, p := range container.Ports {
pm := kubecontainer.PortMapping{
HostPort: int(p.HostPort),
ContainerPort: int(p.ContainerPort),
Protocol: p.Protocol,
HostIP: p.HostIP,
}
// We need to create some default port name if it's not specified, since
// this is necessary for rkt.
// http://issue.k8s.io/7710
if p.Name == "" {
pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort)
} else {
pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name)
}
// Protect against exposing the same protocol-port more than once in a container.
if _, ok := names[pm.Name]; ok {
glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name)
continue
}
ports = append(ports, pm)
names[pm.Name] = struct{}{}
}
return
}
// truncatePodHostnameIfNeeded truncates the pod hostname if it's longer than 63 chars.
func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
const hostnameMaxLen = 63
if len(hostname) <= hostnameMaxLen {
return hostname, nil
}
truncated := hostname[:hostnameMaxLen]
glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated)
// hostname should not end with '-' or '.'
truncated = strings.TrimRight(truncated, "-.")
if len(truncated) == 0 {
// This should never happen.
return "", fmt.Errorf("hostname for pod %q was invalid: %q", podName, hostname)
}
return truncated, nil
}
// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod,
// given that pod's spec and annotations or returns an error.
func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
// TODO(vmarmol): Handle better.
clusterDomain := kl.clusterDomain
podAnnotations := pod.Annotations
if podAnnotations == nil {
podAnnotations = make(map[string]string)
}
hostname := pod.Name
if len(pod.Spec.Hostname) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 {
return "", "", fmt.Errorf("Pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";"))
}
hostname = pod.Spec.Hostname
} else {
hostnameCandidate := podAnnotations[utilpod.PodHostnameAnnotation]
if len(utilvalidation.IsDNS1123Label(hostnameCandidate)) == 0 {
// use hostname annotation, if specified.
hostname = hostnameCandidate
}
}
hostname, err := truncatePodHostnameIfNeeded(pod.Name, hostname)
if err != nil {
return "", "", err
}
hostDomain := ""
if len(pod.Spec.Subdomain) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 {
return "", "", fmt.Errorf("Pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";"))
}
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
} else {
subdomainCandidate := pod.Annotations[utilpod.PodSubdomainAnnotation]
if len(utilvalidation.IsDNS1123Label(subdomainCandidate)) == 0 {
hostDomain = fmt.Sprintf("%s.%s.svc.%s", subdomainCandidate, pod.Namespace, clusterDomain)
}
}
return hostname, hostDomain, nil
}
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
var err error
pcm := kl.containerManager.NewPodContainerManager()
_, podContainerName := pcm.GetPodContainerName(pod)
opts := &kubecontainer.RunContainerOptions{CgroupParent: podContainerName}
hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
opts.Hostname = hostname
podName := volumehelper.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
opts.PortMappings = makePortMappings(container)
opts.Devices = makeDevices(container)
opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes)
if err != nil {
return nil, err
}
opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP)
if err != nil {
return nil, err
}
// Disabling adding TerminationMessagePath on Windows as these files would be mounted as docker volume and
// Docker for Windows has a bug where only directories can be mounted
if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" {
p := kl.getPodContainerDir(pod.UID, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
glog.Errorf("Error on creating %q: %v", p, err)
} else {
opts.PodContainerDir = p
}
}
opts.DNS, opts.DNSSearch, err = kl.GetClusterDNS(pod)
if err != nil {
return nil, err
}
// only do this check if the experimental behavior is enabled, otherwise allow it to default to false
if kl.experimentalHostUserNamespaceDefaulting {
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod)
}
return opts, nil
}
var masterServices = sets.NewString("kubernetes")
// getServiceEnvVarMap makes a map[string]string of env vars for services a
// pod in namespace ns should see.
func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
var (
serviceMap = make(map[string]*v1.Service)
m = make(map[string]string)
)
// Get all service resources from the master (via a cache),
// and populate them into service environment variables.
if kl.serviceLister == nil {
// Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars.
return m, nil
}
services, err := kl.serviceLister.List(labels.Everything())
if err != nil {
return m, fmt.Errorf("failed to list services when setting up env vars.")
}
// project the services in namespace ns onto the master services
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
if !v1.IsServiceIPSet(service) {
continue
}
serviceName := service.Name
switch service.Namespace {
// for the case whether the master service namespace is the namespace the pod
// is in, the pod should receive all the services in the namespace.
//
// ordering of the case clauses below enforces this
case ns:
serviceMap[serviceName] = service
case kl.masterServiceNamespace:
if masterServices.Has(serviceName) {
if _, exists := serviceMap[serviceName]; !exists {
serviceMap[serviceName] = service
}
}
}
}
mappedServices := []*v1.Service{}
for key := range serviceMap {
mappedServices = append(mappedServices, serviceMap[key])
}
for _, e := range envvars.FromServices(mappedServices) {
m[e.Name] = e.Value
}
return m, nil
}
// Make the environment variables for a pod in the given namespace.
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) {
var result []kubecontainer.EnvVar
// Note: These are added to the docker Config, but are not included in the checksum computed
// by dockertools.BuildDockerName(...). That way, we can still determine whether an
// v1.Container is already running by its hash. (We don't want to restart a container just
// because some service changed.)
//
// Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
// and keep trying to resolve the DNS name of the service (recommended).
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace)
if err != nil {
return result, err
}
// Determine the final values of variables:
//
// 1. Determine the final value of each variable:
// a. If the variable's Value is set, expand the `$(var)` references to other
// variables in the .Value field; the sources of variables are the declared
// variables of the container and the service environment variables
// b. If a source is defined for an environment variable, resolve the source
// 2. Create the container's environment in the order variables are declared
// 3. Add remaining service environment vars
var (
tmpEnv = make(map[string]string)
configMaps = make(map[string]*v1.ConfigMap)
secrets = make(map[string]*v1.Secret)
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
)
for _, envVar := range container.Env {
// Accesses apiserver+Pods.
// So, the master may set service env vars, or kubelet may. In case both are doing
// it, we delete the key from the kubelet-generated ones so we don't have duplicate
// env vars.
// TODO: remove this net line once all platforms use apiserver+Pods.
delete(serviceEnv, envVar.Name)
runtimeVal := envVar.Value
if runtimeVal != "" {
// Step 1a: expand variable references
runtimeVal = expansion.Expand(runtimeVal, mappingFunc)
} else if envVar.ValueFrom != nil {
// Step 1b: resolve alternate env var sources
switch {
case envVar.ValueFrom.FieldRef != nil:
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP)
if err != nil {
return result, err
}
case envVar.ValueFrom.ResourceFieldRef != nil:
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container)
if err != nil {
return result, err
}
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
if err != nil {
return result, err
}
case envVar.ValueFrom.ConfigMapKeyRef != nil:
name := envVar.ValueFrom.ConfigMapKeyRef.Name
key := envVar.ValueFrom.ConfigMapKeyRef.Key
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name)
if err != nil {
return result, err
}
configMaps[name] = configMap
}
runtimeVal, ok = configMap.Data[key]
if !ok {
return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
}
case envVar.ValueFrom.SecretKeyRef != nil:
name := envVar.ValueFrom.SecretKeyRef.Name
key := envVar.ValueFrom.SecretKeyRef.Key
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name)
if err != nil {
return result, err
}
secrets[name] = secret
}
runtimeValBytes, ok := secret.Data[key]
if !ok {
return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
}
runtimeVal = string(runtimeValBytes)
}
}
tmpEnv[envVar.Name] = runtimeVal
result = append(result, kubecontainer.EnvVar{Name: envVar.Name, Value: tmpEnv[envVar.Name]})
}
// Append remaining service env vars.
for k, v := range serviceEnv {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
return result, nil
}
// podFieldSelectorRuntimeValue returns the runtime value of the given
// selector for a pod.
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) {
internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
if err != nil {
return "", err
}
switch internalFieldPath {
case "spec.nodeName":
return pod.Spec.NodeName, nil
case "spec.serviceAccountName":
return pod.Spec.ServiceAccountName, nil
case "status.podIP":
return podIP, nil
}
return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
}
// containerResourceRuntimeValue returns the value of the provided container resource
func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) {
containerName := fs.ContainerName
if len(containerName) == 0 {
return fieldpath.ExtractContainerResourceValue(fs, container)
} else {
return fieldpath.ExtractResourceValueByContainerName(fs, pod, containerName)
}
}
// One of the following arguments must be non-nil: runningPod, status.
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
var p kubecontainer.Pod
if runningPod != nil {
p = *runningPod
} else if status != nil {
p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status)
}
// cache the pod cgroup Name for reducing the cpu resource limits of the pod cgroup once the pod is killed
pcm := kl.containerManager.NewPodContainerManager()
var podCgroup cm.CgroupName
reduceCpuLimts := true
if pod != nil {
podCgroup, _ = pcm.GetPodContainerName(pod)
} else {
// If the pod is nil then cgroup limit must have already
// been decreased earlier
reduceCpuLimts = false
}
// Call the container runtime KillPod method which stops all running containers of the pod
if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil {
return err
}
// At this point the pod might not completely free up cpu and memory resources.
// In such a case deleting the pod's cgroup might cause the pod's charges to be transferred
// to the parent cgroup. There might be various kinds of pod charges at this point.
// For example, any volume used by the pod that was backed by memory will have its
// pages charged to the pod cgroup until those volumes are removed by the kubelet.
// Hence we only reduce the cpu resource limits of the pod's cgroup
// and defer the responsibilty of destroying the pod's cgroup to the
// cleanup method and the housekeeping loop.
if reduceCpuLimts {
pcm.ReduceCPULimits(podCgroup)
}
return nil
}
// makePodDataDirs creates the dirs for the pod datas.
func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error {
uid := pod.UID
if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// returns whether the pod uses the host network namespace.
func podUsesHostNetwork(pod *v1.Pod) bool {
return pod.Spec.HostNetwork
}
// getPullSecretsForPod inspects the Pod and retrieves the referenced pull
// secrets.
// TODO: duplicate secrets are being retrieved multiple times and there
// is no cache. Creating and using a secret manager interface will make this
// easier to address.
func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) ([]v1.Secret, error) {
pullSecrets := []v1.Secret{}
for _, secretRef := range pod.Spec.ImagePullSecrets {
secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name)
if err != nil {
glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err)
continue
}
pullSecrets = append(pullSecrets, *secret)
}
return pullSecrets, nil
}
// Returns true if pod is in the terminated state ("Failed" or "Succeeded").
func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool {
var status v1.PodStatus
// Check the cached pod status which was set after the last sync.
status, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
status = pod.Status
}
if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded {
return true
}
return false
}
// filterOutTerminatedPods returns the given pods which the status manager
// does not consider failed or succeeded.
func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {
var filteredPods []*v1.Pod
for _, p := range pods {
if kl.podIsTerminated(p) {
continue
}
filteredPods = append(filteredPods, p)
}
return filteredPods
}
// removeOrphanedPodStatuses removes obsolete entries in podStatus where
// the pod is no longer considered bound to this node.
func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) {
podUIDs := make(map[types.UID]bool)
for _, pod := range pods {
podUIDs[pod.UID] = true
}
for _, pod := range mirrorPods {
podUIDs[pod.UID] = true
}
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
}
// HandlePodCleanups performs a series of cleanup work, including terminating
// pod workers, killing unwanted pods, and removing orphaned volumes/pod
// directories.
// NOTE: This function is executed by the main sync loop, so it
// should not contain any blocking calls.
func (kl *Kubelet) HandlePodCleanups() error {
// The kubelet lacks checkpointing, so we need to introspect the set of pods
// in the cgroup tree prior to inspecting the set of pods in our pod manager.
// this ensures our view of the cgroup tree does not mistakenly observe pods
// that are added after the fact...
var (
cgroupPods map[types.UID]cm.CgroupName
err error
)
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
cgroupPods, err = pcm.GetAllPodsFromCgroups()
if err != nil {
return fmt.Errorf("failed to get list of pods that still exist on cgroup mounts: %v", err)
}
}
allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods()
// Pod phase progresses monotonically. Once a pod has reached a final state,
// it should never leave regardless of the restart policy. The statuses
// of such pods should not be changed, and there is no need to sync them.
// TODO: the logic here does not handle two cases:
// 1. If the containers were removed immediately after they died, kubelet
// may fail to generate correct statuses, let alone filtering correctly.
// 2. If kubelet restarted before writing the terminated status for a pod
// to the apiserver, it could still restart the terminated pod (even
// though the pod was not considered terminated by the apiserver).
// These two conditions could be alleviated by checkpointing kubelet.
activePods := kl.filterOutTerminatedPods(allPods)
desiredPods := make(map[types.UID]empty)
for _, pod := range activePods {
desiredPods[pod.UID] = empty{}
}
// Stop the workers for no-longer existing pods.
// TODO: is here the best place to forget pod workers?
kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods)
kl.probeManager.CleanupPods(activePods)
runningPods, err := kl.runtimeCache.GetPods()
if err != nil {
glog.Errorf("Error listing containers: %#v", err)
return err
}
for _, pod := range runningPods {
if _, found := desiredPods[pod.ID]; !found {
kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod}
}
}
kl.removeOrphanedPodStatuses(allPods, mirrorPods)
// Note that we just killed the unwanted pods. This may not have reflected
// in the cache. We need to bypass the cache to get the latest set of
// running pods to clean up the volumes.
// TODO: Evaluate the performance impact of bypassing the runtime cache.
runningPods, err = kl.containerRuntime.GetPods(false)
if err != nil {
glog.Errorf("Error listing containers: %#v", err)
return err
}
// Remove any orphaned volumes.
// Note that we pass all pods (including terminated pods) to the function,
// so that we don't remove volumes associated with terminated but not yet
// deleted pods.
err = kl.cleanupOrphanedPodDirs(allPods, runningPods)
if err != nil {
// We want all cleanup tasks to be run even if one of them failed. So
// we just log an error here and continue other cleanup tasks.
// This also applies to the other clean up tasks.
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
}
// Remove any orphaned mirror pods.
kl.podManager.DeleteOrphanedMirrorPods()
// Clear out any old bandwidth rules
err = kl.cleanupBandwidthLimits(allPods)
if err != nil {
glog.Errorf("Failed cleaning up bandwidth limits: %v", err)
}
// Remove any cgroups in the hierarchy for pods that should no longer exist
if kl.cgroupsPerQOS {
kl.cleanupOrphanedPodCgroups(cgroupPods, allPods, runningPods)
}
kl.backOff.GC()
return nil
}
// podKiller launches a goroutine to kill a pod received from the channel if
// another goroutine isn't already in action.
func (kl *Kubelet) podKiller() {
killing := sets.NewString()
resultCh := make(chan types.UID)
defer close(resultCh)
for {
select {
case podPair, ok := <-kl.podKillingCh:
if !ok {
return
}
runningPod := podPair.RunningPod
apiPod := podPair.APIPod
if killing.Has(string(runningPod.ID)) {
// The pod is already being killed.
break
}
killing.Insert(string(runningPod.ID))
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) {
defer func() {
ch <- runningPod.ID
}()
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
err := kl.killPod(apiPod, runningPod, nil, nil)
if err != nil {
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
}
}(apiPod, runningPod, resultCh)
case podID := <-resultCh:
killing.Delete(string(podID))
}
}
}
// checkHostPortConflicts detects pods with conflicted host ports.
func hasHostPortConflicts(pods []*v1.Pod) bool {
ports := sets.String{}
for _, pod := range pods {
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 {
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
return true
}
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 {
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
return true
}
}
return false
}
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := v1.GetContainerStatus(podStatus.ContainerStatuses, containerName)
// if not found, check the init containers
if !found {
cStatus, found = v1.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
}
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
}
lastState := cStatus.LastTerminationState
waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated
switch {
case previous:
if lastState.Terminated == nil {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case running != nil:
cID = cStatus.ContainerID
case terminated != nil:
cID = terminated.ContainerID
case lastState.Terminated != nil:
cID = lastState.Terminated.ContainerID
case waiting != nil:
// output some info for the most common pending failures
switch reason := waiting.Reason; reason {
case images.ErrImagePull.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
case images.ErrImagePullBackOff.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
default:
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
}
default:
// unrecognized state
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
}
return kubecontainer.ParseContainerID(cID), nil
}
// GetKubeletContainerLogs returns logs from the container
// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt
// or all of them.
func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
// Pod workers periodically write status to statusManager. If status is not
// cached there, something is wrong (or kubelet just restarted and hasn't
// caught up yet). Just assume the pod is not ready yet.
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
if err != nil {
return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err)
}
pod, ok := kl.GetPodByName(namespace, name)
if !ok {
return fmt.Errorf("pod %q cannot be found - no logs available", name)
}
podUID := pod.UID
if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
podUID = mirrorPod.UID
}
podStatus, found := kl.statusManager.GetPodStatus(podUID)
if !found {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
podStatus = pod.Status
}
// TODO: Consolidate the logic here with kuberuntime.GetContainerLogs, here we convert container name to containerID,
// but inside kuberuntime we convert container id back to container name and restart count.
// TODO: After separate container log lifecycle management, we should get log based on the existing log files
// instead of container status.
containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous)
if err != nil {
return err
}
// Do a zero-byte write to stdout before handing off to the container runtime.
// This ensures at least one Write call is made to the writer when copying starts,
// even if we then block waiting for log output from the container.
if _, err := stdout.Write([]byte{}); err != nil {
return err
}
return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr)
}
// GetPhase returns the phase of a pod given its container info.
// This func is exported to simplify integration with 3rd party kubelet
// integrations like kubernetes-mesos.
func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
initialized := 0
pendingInitialization := 0
failedInitialization := 0
for _, container := range spec.InitContainers {
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
if !ok {
pendingInitialization++
continue
}
switch {
case containerStatus.State.Running != nil:
pendingInitialization++
case containerStatus.State.Terminated != nil:
if containerStatus.State.Terminated.ExitCode == 0 {
initialized++
} else {
failedInitialization++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
if containerStatus.LastTerminationState.Terminated.ExitCode == 0 {
initialized++
} else {
failedInitialization++
}
} else {
pendingInitialization++
}
default:
pendingInitialization++
}
}
unknown := 0
running := 0
waiting := 0
stopped := 0
failed := 0
succeeded := 0
for _, container := range spec.Containers {
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
if !ok {
unknown++
continue
}
switch {
case containerStatus.State.Running != nil:
running++
case containerStatus.State.Terminated != nil:
stopped++
if containerStatus.State.Terminated.ExitCode == 0 {
succeeded++
} else {
failed++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
stopped++
} else {
waiting++
}
default:
unknown++
}
}
if failedInitialization > 0 && spec.RestartPolicy == v1.RestartPolicyNever {
return v1.PodFailed
}
switch {
case pendingInitialization > 0:
fallthrough
case waiting > 0:
glog.V(5).Infof("pod waiting > 0, pending")
// One or more containers has not been started
return v1.PodPending
case running > 0 && unknown == 0:
// All containers have been started, and at least
// one container is running
return v1.PodRunning
case running == 0 && stopped > 0 && unknown == 0:
// All containers are terminated
if spec.RestartPolicy == v1.RestartPolicyAlways {
// All containers are in the process of restarting
return v1.PodRunning
}
if stopped == succeeded {
// RestartPolicy is not Always, and all
// containers are terminated in success
return v1.PodSucceeded
}
if spec.RestartPolicy == v1.RestartPolicyNever {
// RestartPolicy is Never, and all containers are
// terminated with at least one in failure
return v1.PodFailed
}
// RestartPolicy is OnFailure, and at least one in failure
// and in the process of restarting
return v1.PodRunning
default:
glog.V(5).Infof("pod default case, pending")
return v1.PodPending
}
}
// generateAPIPodStatus creates the final API pod status for a pod, given the
// internal pod status.
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
glog.V(3).Infof("Generating status for %q", format.Pod(pod))
// check if an internal module has requested the pod is evicted.
for _, podSyncHandler := range kl.PodSyncHandlers {
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
return v1.PodStatus{
Phase: v1.PodFailed,
Reason: result.Reason,
Message: result.Message,
}
}
}
s := kl.convertStatusToAPIStatus(pod, podStatus)
// Assume info is ready to process
spec := &pod.Spec
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = GetPhase(spec, allStatus)
kl.probeManager.UpdatePodStatus(pod.UID, s)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
// it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true.
if _, oldPodScheduled := v1.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
s.Conditions = append(s.Conditions, *oldPodScheduled)
}
v1.UpdatePodCondition(&pod.Status, &v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
})
if !kl.standaloneMode {
hostIP, err := kl.getHostIPAnyWay()
if err != nil {
glog.V(4).Infof("Cannot get host IP: %v", err)
} else {
s.HostIP = hostIP.String()
if podUsesHostNetwork(pod) && s.PodIP == "" {
s.PodIP = hostIP.String()
}
}
}
return *s
}
// convertStatusToAPIStatus creates an api PodStatus for the given pod from
// the given internal pod status. It is purely transformative and does not
// alter the kubelet state at all.
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *v1.PodStatus {
var apiPodStatus v1.PodStatus
apiPodStatus.PodIP = podStatus.IP
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
pod.Status.ContainerStatuses,
pod.Spec.Containers,
len(pod.Spec.InitContainers) > 0,
false,
)
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
pod.Status.InitContainerStatuses,
pod.Spec.InitContainers,
len(pod.Spec.InitContainers) > 0,
true,
)
return &apiPodStatus
}
// convertToAPIContainerStatuses converts the given internal container
// statuses into API container statuses.
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *v1.ContainerStatus {
cid := cs.ID.String()
status := &v1.ContainerStatus{
Name: cs.Name,
RestartCount: int32(cs.RestartCount),
Image: cs.Image,
ImageID: cs.ImageID,
ContainerID: cid,
}
switch cs.State {
case kubecontainer.ContainerStateRunning:
status.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.NewTime(cs.StartedAt)}
case kubecontainer.ContainerStateExited:
status.State.Terminated = &v1.ContainerStateTerminated{
ExitCode: int32(cs.ExitCode),
Reason: cs.Reason,
Message: cs.Message,
StartedAt: metav1.NewTime(cs.StartedAt),
FinishedAt: metav1.NewTime(cs.FinishedAt),
ContainerID: cid,
}
default:
status.State.Waiting = &v1.ContainerStateWaiting{}
}
return status
}
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]v1.ContainerStatus, len(containers))
for _, status := range previousStatus {
oldStatuses[status.Name] = status
}
// Set all container statuses to default waiting state
statuses := make(map[string]*v1.ContainerStatus, len(containers))
defaultWaitingState := v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ContainerCreating"}}
if hasInitContainers {
defaultWaitingState = v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "PodInitializing"}}
}
for _, container := range containers {
status := &v1.ContainerStatus{
Name: container.Name,
Image: container.Image,
State: defaultWaitingState,
}
// Apply some values from the old statuses as the default values.
if oldStatus, found := oldStatuses[container.Name]; found {
status.RestartCount = oldStatus.RestartCount
status.LastTerminationState = oldStatus.LastTerminationState
}
statuses[container.Name] = status
}
// Make the latest container status comes first.
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses)))
// Set container statuses according to the statuses seen in pod status
containerSeen := map[string]int{}
for _, cStatus := range podStatus.ContainerStatuses {
cName := cStatus.Name
if _, ok := statuses[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerSeen[cName] >= 2 {
continue
}
status := convertContainerStatus(cStatus)
if containerSeen[cName] == 0 {
statuses[cName] = status
} else {
statuses[cName].LastTerminationState = status.State
}
containerSeen[cName] = containerSeen[cName] + 1
}
// Handle the containers failed to be started, which should be in Waiting state.
for _, container := range containers {
if isInitContainer {
// If the init container is terminated with exit code 0, it won't be restarted.
// TODO(random-liu): Handle this in a cleaner way.
s := podStatus.FindContainerStatusByName(container.Name)
if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 {
continue
}
}
// If a container should be restarted in next syncpod, it is *Waiting*.
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
continue
}
status := statuses[container.Name]
reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name)
if !ok {
// In fact, we could also apply Waiting state here, but it is less informative,
// and the container will be restarted soon, so we prefer the original state here.
// Note that with the current implementation of ShouldContainerBeRestarted the original state here
// could be:
// * Waiting: There is no associated historical container and start failure reason record.
// * Terminated: The container is terminated.
continue
}
if status.State.Terminated != nil {
status.LastTerminationState = status.State
}
status.State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: reason.Error(),
Message: message,
},
}
statuses[container.Name] = status
}
var containerStatuses []v1.ContainerStatus
for _, status := range statuses {
containerStatuses = append(containerStatuses, *status)
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod has a deterministic order.
if isInitContainer {
kubetypes.SortInitContainerStatuses(pod, containerStatuses)
} else {
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
}
return containerStatuses
}
// Returns logs of current machine.
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// TODO: whitelist logs we are willing to serve
kl.logServer.ServeHTTP(w, req)
}
// findContainer finds and returns the container with the given pod ID, full name, and container name.
// It returns nil if not found.
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
podUID = kl.podManager.TranslatePodUID(podUID)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
return pod.FindContainerByName(containerName), nil
}
// Run a command in a container, returns the combined stdout, stderr as an array of bytes
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
// TODO(timstclair): Pass a proper timeout value.
return kl.runner.RunInContainer(container.ID, cmd, 0)
}
// ExecInContainer executes a command in a container, connecting the supplied
// stdin/stdout/stderr to the command's IO streams.
func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return err
}
if container == nil {
return fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty, resize, timeout)
}
// AttachContainer uses the container runtime to attach the given streams to
// the given container.
func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return err
}
if container == nil {
return fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty, resize)
}
// PortForward connects to the pod's port and copies data between the port
// and the stream.
func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16, stream io.ReadWriteCloser) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return err
}
podUID = kl.podManager.TranslatePodUID(podUID)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return fmt.Errorf("pod not found (%q)", podFullName)
}
return streamingRuntime.PortForward(&pod, port, stream)
}
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommand.Options) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the exec directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
default:
return nil, fmt.Errorf("container runtime does not support exec")
}
}
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommand.Options) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the attach directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
// The TTY setting for attach must match the TTY setting in the initial container configuration,
// since whether the process is running in a TTY cannot be changed after it has started. We
// need the api.Pod to get the TTY status.
pod, found := kl.GetPodByFullName(podFullName)
if !found || (string(podUID) != "" && pod.UID != podUID) {
return nil, fmt.Errorf("pod %s not found", podFullName)
}
containerSpec := kubecontainer.GetContainerSpec(pod, containerName)
if containerSpec == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
tty := containerSpec.TTY
return streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
default:
return nil, fmt.Errorf("container runtime does not support attach")
}
}
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the attach directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
podUID = kl.podManager.TranslatePodUID(podUID)
podFullName := kubecontainer.BuildPodFullName(podName, podNamespace)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return nil, fmt.Errorf("pod not found (%q)", podFullName)
}
return streamingRuntime.GetPortForward(podName, podNamespace, podUID)
default:
return nil, fmt.Errorf("container runtime does not support port-forward")
}
}
// cleanupOrphanedPodCgroups removes the Cgroups of pods that should not be
// running and whose volumes have been cleaned up.
func (kl *Kubelet) cleanupOrphanedPodCgroups(
cgroupPods map[types.UID]cm.CgroupName,
pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
// Add all running and existing terminated pods to a set allPods
allPods := sets.NewString()
for _, pod := range pods {
allPods.Insert(string(pod.UID))
}
for _, pod := range runningPods {
allPods.Insert(string(pod.ID))
}
pcm := kl.containerManager.NewPodContainerManager()
// Iterate over all the found pods to verify if they should be running
for uid, val := range cgroupPods {
if allPods.Has(string(uid)) {
continue
}
// If volumes have not been unmounted/detached, do not delete the cgroup in case so the charge does not go to the parent.
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up, Skipping cgroups deletion: %v", uid)
continue
}
glog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid)
// Destroy all cgroups of pod that should not be running,
// by first killing all the attached processes to these cgroups.
// We ignore errors thrown by the method, as the housekeeping loop would
// again try to delete these unwanted pod cgroups
go pcm.Destroy(val)
}
return nil
}
// enableHostUserNamespace determines if the host user namespace should be used by the container runtime.
// Returns true if the pod is using a host pid, pic, or network namespace, the pod is using a non-namespaced
// capability, the pod contains a privileged container, or the pod has a host path volume.
//
// NOTE: when if a container shares any namespace with another container it must also share the user namespace
// or it will not have the correct capabilities in the namespace. This means that host user namespace
// is enabled per pod, not per container.
func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool {
if hasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) {
return true
}
return false
}
// hasPrivilegedContainer returns true if any of the containers in the pod are privileged.
func hasPrivilegedContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil &&
c.SecurityContext.Privileged != nil &&
*c.SecurityContext.Privileged {
return true
}
}
return false
}
// hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container.
func hasNonNamespacedCapability(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
for _, cap := range c.SecurityContext.Capabilities.Add {
if cap == "MKNOD" || cap == "SYS_TIME" || cap == "SYS_MODULE" {
return true
}
}
}
}
return false
}
// hasHostVolume returns true if the pod spec has a HostPath volume.
func hasHostVolume(pod *v1.Pod) bool {
for _, v := range pod.Spec.Volumes {
if v.HostPath != nil {
return true
}
}
return false
}
// hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true.
func hasHostNamespace(pod *v1.Pod) bool {
if pod.Spec.SecurityContext == nil {
return false
}
return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID
}
// hasHostMountPVC returns true if a PVC is referencing a HostPath volume.
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName)
if err != nil {
glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err)
continue
}
if pvc != nil {
referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName)
if err != nil {
glog.Warningf("unable to retrieve pvc %s - %v", pvc.Spec.VolumeName, err)
continue
}
if referencedVolume != nil && referencedVolume.Spec.HostPath != nil {
return true
}
}
}
}
return false
}
| pkg/kubelet/kubelet_pods.go | 1 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.013125995174050331,
0.0012077264254912734,
0.00016397815488744527,
0.00027254552696831524,
0.002166930353268981
] |
{
"id": 0,
"code_window": [
"\t\treturn syncErr\n",
"\t}\n",
"\n",
"\t// If the network plugin is not ready, only start the pod if it uses the host network\n",
"\tif rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !podUsesHostNetwork(pod) {\n",
"\t\treturn fmt.Errorf(\"network is not ready: %v\", rs)\n",
"\t}\n",
"\n",
"\t// Create Cgroups for the pod and apply resource parameters\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) {\n"
],
"file_path": "pkg/kubelet/kubelet.go",
"type": "replace",
"edit_start_line_idx": 1000
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
"go_test",
"cgo_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"fake_shaper.go",
"interfaces.go",
"linux.go",
"utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/resource:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/sets:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"linux_test.go",
"utils_test.go",
],
library = "go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/util/exec:go_default_library",
],
)
| pkg/util/bandwidth/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.00017734307039063424,
0.00017559781554155052,
0.00017390707216691226,
0.0001758365979185328,
0.0000011396044783396064
] |
{
"id": 0,
"code_window": [
"\t\treturn syncErr\n",
"\t}\n",
"\n",
"\t// If the network plugin is not ready, only start the pod if it uses the host network\n",
"\tif rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !podUsesHostNetwork(pod) {\n",
"\t\treturn fmt.Errorf(\"network is not ready: %v\", rs)\n",
"\t}\n",
"\n",
"\t// Create Cgroups for the pod and apply resource parameters\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) {\n"
],
"file_path": "pkg/kubelet/kubelet.go",
"type": "replace",
"edit_start_line_idx": 1000
} | {
"foo": "bar"
}
| pkg/kubelet/dockertools/fixtures/seccomp/test | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.00017232589016202837,
0.00017232589016202837,
0.00017232589016202837,
0.00017232589016202837,
0
] |
{
"id": 0,
"code_window": [
"\t\treturn syncErr\n",
"\t}\n",
"\n",
"\t// If the network plugin is not ready, only start the pod if it uses the host network\n",
"\tif rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !podUsesHostNetwork(pod) {\n",
"\t\treturn fmt.Errorf(\"network is not ready: %v\", rs)\n",
"\t}\n",
"\n",
"\t// Create Cgroups for the pod and apply resource parameters\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif rs := kl.runtimeState.networkErrors(); len(rs) != 0 && !kubecontainer.IsHostNetworkPod(pod) {\n"
],
"file_path": "pkg/kubelet/kubelet.go",
"type": "replace",
"edit_start_line_idx": 1000
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.kubernetes.pkg.runtime.schema;
import "k8s.io/kubernetes/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "schema";
| pkg/runtime/schema/generated.proto | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.00017818583000916988,
0.00017657682474236935,
0.00017353387374896556,
0.0001780107559170574,
0.000002152874458261067
] |
{
"id": 2,
"code_window": [
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"// returns whether the pod uses the host network namespace.\n",
"func podUsesHostNetwork(pod *v1.Pod) bool {\n",
"\treturn pod.Spec.HostNetwork\n",
"}\n",
"\n",
"// getPullSecretsForPod inspects the Pod and retrieves the referenced pull\n",
"// secrets.\n",
"// TODO: duplicate secrets are being retrieved multiple times and there\n",
"// is no cache. Creating and using a secret manager interface will make this\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/kubelet/kubelet_pods.go",
"type": "replace",
"edit_start_line_idx": 594
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/validation"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/envvars"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/term"
utilvalidation "k8s.io/kubernetes/pkg/util/validation"
"k8s.io/kubernetes/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
)
// Get a list of pods that have data directories.
func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
podInfos, err := ioutil.ReadDir(kl.getPodsDir())
if err != nil {
return nil, err
}
pods := []types.UID{}
for i := range podInfos {
if podInfos[i].IsDir() {
pods = append(pods, types.UID(podInfos[i].Name()))
}
}
return pods, nil
}
// getActivePods returns non-terminal pods
func (kl *Kubelet) getActivePods() []*v1.Pod {
allPods := kl.podManager.GetPods()
activePods := kl.filterOutTerminatedPods(allPods)
return activePods
}
// makeDevices determines the devices for the given container.
// Experimental. For now, we hardcode /dev/nvidia0 no matter what the user asks for
// (we only support one device per node).
// TODO: add support for more than 1 GPU after #28216.
func makeDevices(container *v1.Container) []kubecontainer.DeviceInfo {
nvidiaGPULimit := container.Resources.Limits.NvidiaGPU()
if nvidiaGPULimit.Value() != 0 {
return []kubecontainer.DeviceInfo{
{PathOnHost: "/dev/nvidia0", PathInContainer: "/dev/nvidia0", Permissions: "mrw"},
{PathOnHost: "/dev/nvidiactl", PathInContainer: "/dev/nvidiactl", Permissions: "mrw"},
{PathOnHost: "/dev/nvidia-uvm", PathInContainer: "/dev/nvidia-uvm", Permissions: "mrw"},
}
}
return nil
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) {
// Kubernetes only mounts on /etc/hosts if :
// - container does not use hostNetwork and
// - container is not an infrastructure(pause) container
// - container is not already mounting on /etc/hosts
// When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set.
// OS is not Windows
mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.HostNetwork) && len(podIP) > 0 && runtime.GOOS != "windows"
glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
for _, mount := range container.VolumeMounts {
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
vol, ok := podVolumes[mount.Name]
if !ok {
glog.Warningf("Mount cannot be satisfied for container %q, because the volume is missing: %q", container.Name, mount)
continue
}
relabelVolume := false
// If the volume supports SELinux and it has not been
// relabeled already and it is not a read-only volume,
// relabel it and mark it as labeled
if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled {
vol.SELinuxLabeled = true
relabelVolume = true
}
hostPath, err := volume.GetPath(vol.Mounter)
if err != nil {
return nil, err
}
if mount.SubPath != "" {
hostPath = filepath.Join(hostPath, mount.SubPath)
}
// Docker Volume Mounts fail on Windows if it is not of the form C:/
containerPath := mount.MountPath
if runtime.GOOS == "windows" {
if strings.HasPrefix(hostPath, "/") && !strings.Contains(hostPath, ":") {
hostPath = "c:" + hostPath
}
if strings.HasPrefix(containerPath, "/") && !strings.Contains(containerPath, ":") {
containerPath = "c:" + containerPath
}
}
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
ReadOnly: mount.ReadOnly,
SELinuxRelabel: relabelVolume,
})
}
if mountEtcHostsFile {
hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain)
if err != nil {
return nil, err
}
mounts = append(mounts, *hostsMount)
}
return mounts, nil
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with.
func makeHostsMount(podDir, podIP, hostName, hostDomainName string) (*kubecontainer.Mount, error) {
hostsFilePath := path.Join(podDir, "etc-hosts")
if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName); err != nil {
return nil, err
}
return &kubecontainer.Mount{
Name: "k8s-managed-etc-hosts",
ContainerPath: etcHostsPath,
HostPath: hostsFilePath,
ReadOnly: false,
SELinuxRelabel: true,
}, nil
}
// ensureHostsFile ensures that the given host file has an up-to-date ip, host
// name, and domain name.
func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string) error {
if _, err := os.Stat(fileName); os.IsExist(err) {
glog.V(4).Infof("kubernetes-managed etc-hosts file exits. Will not be recreated: %q", fileName)
return nil
}
var buffer bytes.Buffer
buffer.WriteString("# Kubernetes-managed hosts file.\n")
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost
buffer.WriteString("fe00::0\tip6-localnet\n")
buffer.WriteString("fe00::0\tip6-mcastprefix\n")
buffer.WriteString("fe00::1\tip6-allnodes\n")
buffer.WriteString("fe00::2\tip6-allrouters\n")
if len(hostDomainName) > 0 {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
} else {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
}
return ioutil.WriteFile(fileName, buffer.Bytes(), 0644)
}
func makePortMappings(container *v1.Container) (ports []kubecontainer.PortMapping) {
names := make(map[string]struct{})
for _, p := range container.Ports {
pm := kubecontainer.PortMapping{
HostPort: int(p.HostPort),
ContainerPort: int(p.ContainerPort),
Protocol: p.Protocol,
HostIP: p.HostIP,
}
// We need to create some default port name if it's not specified, since
// this is necessary for rkt.
// http://issue.k8s.io/7710
if p.Name == "" {
pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort)
} else {
pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name)
}
// Protect against exposing the same protocol-port more than once in a container.
if _, ok := names[pm.Name]; ok {
glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name)
continue
}
ports = append(ports, pm)
names[pm.Name] = struct{}{}
}
return
}
// truncatePodHostnameIfNeeded truncates the pod hostname if it's longer than 63 chars.
func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
const hostnameMaxLen = 63
if len(hostname) <= hostnameMaxLen {
return hostname, nil
}
truncated := hostname[:hostnameMaxLen]
glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated)
// hostname should not end with '-' or '.'
truncated = strings.TrimRight(truncated, "-.")
if len(truncated) == 0 {
// This should never happen.
return "", fmt.Errorf("hostname for pod %q was invalid: %q", podName, hostname)
}
return truncated, nil
}
// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod,
// given that pod's spec and annotations or returns an error.
func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
// TODO(vmarmol): Handle better.
clusterDomain := kl.clusterDomain
podAnnotations := pod.Annotations
if podAnnotations == nil {
podAnnotations = make(map[string]string)
}
hostname := pod.Name
if len(pod.Spec.Hostname) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 {
return "", "", fmt.Errorf("Pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";"))
}
hostname = pod.Spec.Hostname
} else {
hostnameCandidate := podAnnotations[utilpod.PodHostnameAnnotation]
if len(utilvalidation.IsDNS1123Label(hostnameCandidate)) == 0 {
// use hostname annotation, if specified.
hostname = hostnameCandidate
}
}
hostname, err := truncatePodHostnameIfNeeded(pod.Name, hostname)
if err != nil {
return "", "", err
}
hostDomain := ""
if len(pod.Spec.Subdomain) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 {
return "", "", fmt.Errorf("Pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";"))
}
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
} else {
subdomainCandidate := pod.Annotations[utilpod.PodSubdomainAnnotation]
if len(utilvalidation.IsDNS1123Label(subdomainCandidate)) == 0 {
hostDomain = fmt.Sprintf("%s.%s.svc.%s", subdomainCandidate, pod.Namespace, clusterDomain)
}
}
return hostname, hostDomain, nil
}
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
var err error
pcm := kl.containerManager.NewPodContainerManager()
_, podContainerName := pcm.GetPodContainerName(pod)
opts := &kubecontainer.RunContainerOptions{CgroupParent: podContainerName}
hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
opts.Hostname = hostname
podName := volumehelper.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
opts.PortMappings = makePortMappings(container)
opts.Devices = makeDevices(container)
opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes)
if err != nil {
return nil, err
}
opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP)
if err != nil {
return nil, err
}
// Disabling adding TerminationMessagePath on Windows as these files would be mounted as docker volume and
// Docker for Windows has a bug where only directories can be mounted
if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" {
p := kl.getPodContainerDir(pod.UID, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
glog.Errorf("Error on creating %q: %v", p, err)
} else {
opts.PodContainerDir = p
}
}
opts.DNS, opts.DNSSearch, err = kl.GetClusterDNS(pod)
if err != nil {
return nil, err
}
// only do this check if the experimental behavior is enabled, otherwise allow it to default to false
if kl.experimentalHostUserNamespaceDefaulting {
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod)
}
return opts, nil
}
var masterServices = sets.NewString("kubernetes")
// getServiceEnvVarMap makes a map[string]string of env vars for services a
// pod in namespace ns should see.
func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
var (
serviceMap = make(map[string]*v1.Service)
m = make(map[string]string)
)
// Get all service resources from the master (via a cache),
// and populate them into service environment variables.
if kl.serviceLister == nil {
// Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars.
return m, nil
}
services, err := kl.serviceLister.List(labels.Everything())
if err != nil {
return m, fmt.Errorf("failed to list services when setting up env vars.")
}
// project the services in namespace ns onto the master services
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
if !v1.IsServiceIPSet(service) {
continue
}
serviceName := service.Name
switch service.Namespace {
// for the case whether the master service namespace is the namespace the pod
// is in, the pod should receive all the services in the namespace.
//
// ordering of the case clauses below enforces this
case ns:
serviceMap[serviceName] = service
case kl.masterServiceNamespace:
if masterServices.Has(serviceName) {
if _, exists := serviceMap[serviceName]; !exists {
serviceMap[serviceName] = service
}
}
}
}
mappedServices := []*v1.Service{}
for key := range serviceMap {
mappedServices = append(mappedServices, serviceMap[key])
}
for _, e := range envvars.FromServices(mappedServices) {
m[e.Name] = e.Value
}
return m, nil
}
// Make the environment variables for a pod in the given namespace.
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) {
var result []kubecontainer.EnvVar
// Note: These are added to the docker Config, but are not included in the checksum computed
// by dockertools.BuildDockerName(...). That way, we can still determine whether an
// v1.Container is already running by its hash. (We don't want to restart a container just
// because some service changed.)
//
// Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
// and keep trying to resolve the DNS name of the service (recommended).
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace)
if err != nil {
return result, err
}
// Determine the final values of variables:
//
// 1. Determine the final value of each variable:
// a. If the variable's Value is set, expand the `$(var)` references to other
// variables in the .Value field; the sources of variables are the declared
// variables of the container and the service environment variables
// b. If a source is defined for an environment variable, resolve the source
// 2. Create the container's environment in the order variables are declared
// 3. Add remaining service environment vars
var (
tmpEnv = make(map[string]string)
configMaps = make(map[string]*v1.ConfigMap)
secrets = make(map[string]*v1.Secret)
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
)
for _, envVar := range container.Env {
// Accesses apiserver+Pods.
// So, the master may set service env vars, or kubelet may. In case both are doing
// it, we delete the key from the kubelet-generated ones so we don't have duplicate
// env vars.
// TODO: remove this net line once all platforms use apiserver+Pods.
delete(serviceEnv, envVar.Name)
runtimeVal := envVar.Value
if runtimeVal != "" {
// Step 1a: expand variable references
runtimeVal = expansion.Expand(runtimeVal, mappingFunc)
} else if envVar.ValueFrom != nil {
// Step 1b: resolve alternate env var sources
switch {
case envVar.ValueFrom.FieldRef != nil:
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP)
if err != nil {
return result, err
}
case envVar.ValueFrom.ResourceFieldRef != nil:
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container)
if err != nil {
return result, err
}
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
if err != nil {
return result, err
}
case envVar.ValueFrom.ConfigMapKeyRef != nil:
name := envVar.ValueFrom.ConfigMapKeyRef.Name
key := envVar.ValueFrom.ConfigMapKeyRef.Key
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name)
if err != nil {
return result, err
}
configMaps[name] = configMap
}
runtimeVal, ok = configMap.Data[key]
if !ok {
return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
}
case envVar.ValueFrom.SecretKeyRef != nil:
name := envVar.ValueFrom.SecretKeyRef.Name
key := envVar.ValueFrom.SecretKeyRef.Key
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name)
if err != nil {
return result, err
}
secrets[name] = secret
}
runtimeValBytes, ok := secret.Data[key]
if !ok {
return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
}
runtimeVal = string(runtimeValBytes)
}
}
tmpEnv[envVar.Name] = runtimeVal
result = append(result, kubecontainer.EnvVar{Name: envVar.Name, Value: tmpEnv[envVar.Name]})
}
// Append remaining service env vars.
for k, v := range serviceEnv {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
return result, nil
}
// podFieldSelectorRuntimeValue returns the runtime value of the given
// selector for a pod.
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) {
internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
if err != nil {
return "", err
}
switch internalFieldPath {
case "spec.nodeName":
return pod.Spec.NodeName, nil
case "spec.serviceAccountName":
return pod.Spec.ServiceAccountName, nil
case "status.podIP":
return podIP, nil
}
return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
}
// containerResourceRuntimeValue returns the value of the provided container resource
func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) {
containerName := fs.ContainerName
if len(containerName) == 0 {
return fieldpath.ExtractContainerResourceValue(fs, container)
} else {
return fieldpath.ExtractResourceValueByContainerName(fs, pod, containerName)
}
}
// One of the following arguments must be non-nil: runningPod, status.
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
var p kubecontainer.Pod
if runningPod != nil {
p = *runningPod
} else if status != nil {
p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status)
}
// cache the pod cgroup Name for reducing the cpu resource limits of the pod cgroup once the pod is killed
pcm := kl.containerManager.NewPodContainerManager()
var podCgroup cm.CgroupName
reduceCpuLimts := true
if pod != nil {
podCgroup, _ = pcm.GetPodContainerName(pod)
} else {
// If the pod is nil then cgroup limit must have already
// been decreased earlier
reduceCpuLimts = false
}
// Call the container runtime KillPod method which stops all running containers of the pod
if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil {
return err
}
// At this point the pod might not completely free up cpu and memory resources.
// In such a case deleting the pod's cgroup might cause the pod's charges to be transferred
// to the parent cgroup. There might be various kinds of pod charges at this point.
// For example, any volume used by the pod that was backed by memory will have its
// pages charged to the pod cgroup until those volumes are removed by the kubelet.
// Hence we only reduce the cpu resource limits of the pod's cgroup
// and defer the responsibilty of destroying the pod's cgroup to the
// cleanup method and the housekeeping loop.
if reduceCpuLimts {
pcm.ReduceCPULimits(podCgroup)
}
return nil
}
// makePodDataDirs creates the dirs for the pod datas.
func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error {
uid := pod.UID
if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// returns whether the pod uses the host network namespace.
func podUsesHostNetwork(pod *v1.Pod) bool {
return pod.Spec.HostNetwork
}
// getPullSecretsForPod inspects the Pod and retrieves the referenced pull
// secrets.
// TODO: duplicate secrets are being retrieved multiple times and there
// is no cache. Creating and using a secret manager interface will make this
// easier to address.
func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) ([]v1.Secret, error) {
pullSecrets := []v1.Secret{}
for _, secretRef := range pod.Spec.ImagePullSecrets {
secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name)
if err != nil {
glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err)
continue
}
pullSecrets = append(pullSecrets, *secret)
}
return pullSecrets, nil
}
// Returns true if pod is in the terminated state ("Failed" or "Succeeded").
func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool {
var status v1.PodStatus
// Check the cached pod status which was set after the last sync.
status, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
status = pod.Status
}
if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded {
return true
}
return false
}
// filterOutTerminatedPods returns the given pods which the status manager
// does not consider failed or succeeded.
func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {
var filteredPods []*v1.Pod
for _, p := range pods {
if kl.podIsTerminated(p) {
continue
}
filteredPods = append(filteredPods, p)
}
return filteredPods
}
// removeOrphanedPodStatuses removes obsolete entries in podStatus where
// the pod is no longer considered bound to this node.
func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) {
podUIDs := make(map[types.UID]bool)
for _, pod := range pods {
podUIDs[pod.UID] = true
}
for _, pod := range mirrorPods {
podUIDs[pod.UID] = true
}
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
}
// HandlePodCleanups performs a series of cleanup work, including terminating
// pod workers, killing unwanted pods, and removing orphaned volumes/pod
// directories.
// NOTE: This function is executed by the main sync loop, so it
// should not contain any blocking calls.
func (kl *Kubelet) HandlePodCleanups() error {
// The kubelet lacks checkpointing, so we need to introspect the set of pods
// in the cgroup tree prior to inspecting the set of pods in our pod manager.
// this ensures our view of the cgroup tree does not mistakenly observe pods
// that are added after the fact...
var (
cgroupPods map[types.UID]cm.CgroupName
err error
)
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
cgroupPods, err = pcm.GetAllPodsFromCgroups()
if err != nil {
return fmt.Errorf("failed to get list of pods that still exist on cgroup mounts: %v", err)
}
}
allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods()
// Pod phase progresses monotonically. Once a pod has reached a final state,
// it should never leave regardless of the restart policy. The statuses
// of such pods should not be changed, and there is no need to sync them.
// TODO: the logic here does not handle two cases:
// 1. If the containers were removed immediately after they died, kubelet
// may fail to generate correct statuses, let alone filtering correctly.
// 2. If kubelet restarted before writing the terminated status for a pod
// to the apiserver, it could still restart the terminated pod (even
// though the pod was not considered terminated by the apiserver).
// These two conditions could be alleviated by checkpointing kubelet.
activePods := kl.filterOutTerminatedPods(allPods)
desiredPods := make(map[types.UID]empty)
for _, pod := range activePods {
desiredPods[pod.UID] = empty{}
}
// Stop the workers for no-longer existing pods.
// TODO: is here the best place to forget pod workers?
kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods)
kl.probeManager.CleanupPods(activePods)
runningPods, err := kl.runtimeCache.GetPods()
if err != nil {
glog.Errorf("Error listing containers: %#v", err)
return err
}
for _, pod := range runningPods {
if _, found := desiredPods[pod.ID]; !found {
kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod}
}
}
kl.removeOrphanedPodStatuses(allPods, mirrorPods)
// Note that we just killed the unwanted pods. This may not have reflected
// in the cache. We need to bypass the cache to get the latest set of
// running pods to clean up the volumes.
// TODO: Evaluate the performance impact of bypassing the runtime cache.
runningPods, err = kl.containerRuntime.GetPods(false)
if err != nil {
glog.Errorf("Error listing containers: %#v", err)
return err
}
// Remove any orphaned volumes.
// Note that we pass all pods (including terminated pods) to the function,
// so that we don't remove volumes associated with terminated but not yet
// deleted pods.
err = kl.cleanupOrphanedPodDirs(allPods, runningPods)
if err != nil {
// We want all cleanup tasks to be run even if one of them failed. So
// we just log an error here and continue other cleanup tasks.
// This also applies to the other clean up tasks.
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
}
// Remove any orphaned mirror pods.
kl.podManager.DeleteOrphanedMirrorPods()
// Clear out any old bandwidth rules
err = kl.cleanupBandwidthLimits(allPods)
if err != nil {
glog.Errorf("Failed cleaning up bandwidth limits: %v", err)
}
// Remove any cgroups in the hierarchy for pods that should no longer exist
if kl.cgroupsPerQOS {
kl.cleanupOrphanedPodCgroups(cgroupPods, allPods, runningPods)
}
kl.backOff.GC()
return nil
}
// podKiller launches a goroutine to kill a pod received from the channel if
// another goroutine isn't already in action.
func (kl *Kubelet) podKiller() {
killing := sets.NewString()
resultCh := make(chan types.UID)
defer close(resultCh)
for {
select {
case podPair, ok := <-kl.podKillingCh:
if !ok {
return
}
runningPod := podPair.RunningPod
apiPod := podPair.APIPod
if killing.Has(string(runningPod.ID)) {
// The pod is already being killed.
break
}
killing.Insert(string(runningPod.ID))
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) {
defer func() {
ch <- runningPod.ID
}()
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
err := kl.killPod(apiPod, runningPod, nil, nil)
if err != nil {
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
}
}(apiPod, runningPod, resultCh)
case podID := <-resultCh:
killing.Delete(string(podID))
}
}
}
// checkHostPortConflicts detects pods with conflicted host ports.
func hasHostPortConflicts(pods []*v1.Pod) bool {
ports := sets.String{}
for _, pod := range pods {
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 {
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
return true
}
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 {
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
return true
}
}
return false
}
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := v1.GetContainerStatus(podStatus.ContainerStatuses, containerName)
// if not found, check the init containers
if !found {
cStatus, found = v1.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
}
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
}
lastState := cStatus.LastTerminationState
waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated
switch {
case previous:
if lastState.Terminated == nil {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case running != nil:
cID = cStatus.ContainerID
case terminated != nil:
cID = terminated.ContainerID
case lastState.Terminated != nil:
cID = lastState.Terminated.ContainerID
case waiting != nil:
// output some info for the most common pending failures
switch reason := waiting.Reason; reason {
case images.ErrImagePull.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
case images.ErrImagePullBackOff.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
default:
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
}
default:
// unrecognized state
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
}
return kubecontainer.ParseContainerID(cID), nil
}
// GetKubeletContainerLogs returns logs from the container
// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt
// or all of them.
func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
// Pod workers periodically write status to statusManager. If status is not
// cached there, something is wrong (or kubelet just restarted and hasn't
// caught up yet). Just assume the pod is not ready yet.
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
if err != nil {
return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err)
}
pod, ok := kl.GetPodByName(namespace, name)
if !ok {
return fmt.Errorf("pod %q cannot be found - no logs available", name)
}
podUID := pod.UID
if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
podUID = mirrorPod.UID
}
podStatus, found := kl.statusManager.GetPodStatus(podUID)
if !found {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
podStatus = pod.Status
}
// TODO: Consolidate the logic here with kuberuntime.GetContainerLogs, here we convert container name to containerID,
// but inside kuberuntime we convert container id back to container name and restart count.
// TODO: After separate container log lifecycle management, we should get log based on the existing log files
// instead of container status.
containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous)
if err != nil {
return err
}
// Do a zero-byte write to stdout before handing off to the container runtime.
// This ensures at least one Write call is made to the writer when copying starts,
// even if we then block waiting for log output from the container.
if _, err := stdout.Write([]byte{}); err != nil {
return err
}
return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr)
}
// GetPhase returns the phase of a pod given its container info.
// This func is exported to simplify integration with 3rd party kubelet
// integrations like kubernetes-mesos.
func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
initialized := 0
pendingInitialization := 0
failedInitialization := 0
for _, container := range spec.InitContainers {
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
if !ok {
pendingInitialization++
continue
}
switch {
case containerStatus.State.Running != nil:
pendingInitialization++
case containerStatus.State.Terminated != nil:
if containerStatus.State.Terminated.ExitCode == 0 {
initialized++
} else {
failedInitialization++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
if containerStatus.LastTerminationState.Terminated.ExitCode == 0 {
initialized++
} else {
failedInitialization++
}
} else {
pendingInitialization++
}
default:
pendingInitialization++
}
}
unknown := 0
running := 0
waiting := 0
stopped := 0
failed := 0
succeeded := 0
for _, container := range spec.Containers {
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
if !ok {
unknown++
continue
}
switch {
case containerStatus.State.Running != nil:
running++
case containerStatus.State.Terminated != nil:
stopped++
if containerStatus.State.Terminated.ExitCode == 0 {
succeeded++
} else {
failed++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
stopped++
} else {
waiting++
}
default:
unknown++
}
}
if failedInitialization > 0 && spec.RestartPolicy == v1.RestartPolicyNever {
return v1.PodFailed
}
switch {
case pendingInitialization > 0:
fallthrough
case waiting > 0:
glog.V(5).Infof("pod waiting > 0, pending")
// One or more containers has not been started
return v1.PodPending
case running > 0 && unknown == 0:
// All containers have been started, and at least
// one container is running
return v1.PodRunning
case running == 0 && stopped > 0 && unknown == 0:
// All containers are terminated
if spec.RestartPolicy == v1.RestartPolicyAlways {
// All containers are in the process of restarting
return v1.PodRunning
}
if stopped == succeeded {
// RestartPolicy is not Always, and all
// containers are terminated in success
return v1.PodSucceeded
}
if spec.RestartPolicy == v1.RestartPolicyNever {
// RestartPolicy is Never, and all containers are
// terminated with at least one in failure
return v1.PodFailed
}
// RestartPolicy is OnFailure, and at least one in failure
// and in the process of restarting
return v1.PodRunning
default:
glog.V(5).Infof("pod default case, pending")
return v1.PodPending
}
}
// generateAPIPodStatus creates the final API pod status for a pod, given the
// internal pod status.
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
glog.V(3).Infof("Generating status for %q", format.Pod(pod))
// check if an internal module has requested the pod is evicted.
for _, podSyncHandler := range kl.PodSyncHandlers {
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
return v1.PodStatus{
Phase: v1.PodFailed,
Reason: result.Reason,
Message: result.Message,
}
}
}
s := kl.convertStatusToAPIStatus(pod, podStatus)
// Assume info is ready to process
spec := &pod.Spec
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = GetPhase(spec, allStatus)
kl.probeManager.UpdatePodStatus(pod.UID, s)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
// it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true.
if _, oldPodScheduled := v1.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
s.Conditions = append(s.Conditions, *oldPodScheduled)
}
v1.UpdatePodCondition(&pod.Status, &v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
})
if !kl.standaloneMode {
hostIP, err := kl.getHostIPAnyWay()
if err != nil {
glog.V(4).Infof("Cannot get host IP: %v", err)
} else {
s.HostIP = hostIP.String()
if podUsesHostNetwork(pod) && s.PodIP == "" {
s.PodIP = hostIP.String()
}
}
}
return *s
}
// convertStatusToAPIStatus creates an api PodStatus for the given pod from
// the given internal pod status. It is purely transformative and does not
// alter the kubelet state at all.
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *v1.PodStatus {
var apiPodStatus v1.PodStatus
apiPodStatus.PodIP = podStatus.IP
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
pod.Status.ContainerStatuses,
pod.Spec.Containers,
len(pod.Spec.InitContainers) > 0,
false,
)
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
pod.Status.InitContainerStatuses,
pod.Spec.InitContainers,
len(pod.Spec.InitContainers) > 0,
true,
)
return &apiPodStatus
}
// convertToAPIContainerStatuses converts the given internal container
// statuses into API container statuses.
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *v1.ContainerStatus {
cid := cs.ID.String()
status := &v1.ContainerStatus{
Name: cs.Name,
RestartCount: int32(cs.RestartCount),
Image: cs.Image,
ImageID: cs.ImageID,
ContainerID: cid,
}
switch cs.State {
case kubecontainer.ContainerStateRunning:
status.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.NewTime(cs.StartedAt)}
case kubecontainer.ContainerStateExited:
status.State.Terminated = &v1.ContainerStateTerminated{
ExitCode: int32(cs.ExitCode),
Reason: cs.Reason,
Message: cs.Message,
StartedAt: metav1.NewTime(cs.StartedAt),
FinishedAt: metav1.NewTime(cs.FinishedAt),
ContainerID: cid,
}
default:
status.State.Waiting = &v1.ContainerStateWaiting{}
}
return status
}
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]v1.ContainerStatus, len(containers))
for _, status := range previousStatus {
oldStatuses[status.Name] = status
}
// Set all container statuses to default waiting state
statuses := make(map[string]*v1.ContainerStatus, len(containers))
defaultWaitingState := v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ContainerCreating"}}
if hasInitContainers {
defaultWaitingState = v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "PodInitializing"}}
}
for _, container := range containers {
status := &v1.ContainerStatus{
Name: container.Name,
Image: container.Image,
State: defaultWaitingState,
}
// Apply some values from the old statuses as the default values.
if oldStatus, found := oldStatuses[container.Name]; found {
status.RestartCount = oldStatus.RestartCount
status.LastTerminationState = oldStatus.LastTerminationState
}
statuses[container.Name] = status
}
// Make the latest container status comes first.
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses)))
// Set container statuses according to the statuses seen in pod status
containerSeen := map[string]int{}
for _, cStatus := range podStatus.ContainerStatuses {
cName := cStatus.Name
if _, ok := statuses[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerSeen[cName] >= 2 {
continue
}
status := convertContainerStatus(cStatus)
if containerSeen[cName] == 0 {
statuses[cName] = status
} else {
statuses[cName].LastTerminationState = status.State
}
containerSeen[cName] = containerSeen[cName] + 1
}
// Handle the containers failed to be started, which should be in Waiting state.
for _, container := range containers {
if isInitContainer {
// If the init container is terminated with exit code 0, it won't be restarted.
// TODO(random-liu): Handle this in a cleaner way.
s := podStatus.FindContainerStatusByName(container.Name)
if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 {
continue
}
}
// If a container should be restarted in next syncpod, it is *Waiting*.
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
continue
}
status := statuses[container.Name]
reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name)
if !ok {
// In fact, we could also apply Waiting state here, but it is less informative,
// and the container will be restarted soon, so we prefer the original state here.
// Note that with the current implementation of ShouldContainerBeRestarted the original state here
// could be:
// * Waiting: There is no associated historical container and start failure reason record.
// * Terminated: The container is terminated.
continue
}
if status.State.Terminated != nil {
status.LastTerminationState = status.State
}
status.State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: reason.Error(),
Message: message,
},
}
statuses[container.Name] = status
}
var containerStatuses []v1.ContainerStatus
for _, status := range statuses {
containerStatuses = append(containerStatuses, *status)
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod has a deterministic order.
if isInitContainer {
kubetypes.SortInitContainerStatuses(pod, containerStatuses)
} else {
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
}
return containerStatuses
}
// Returns logs of current machine.
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// TODO: whitelist logs we are willing to serve
kl.logServer.ServeHTTP(w, req)
}
// findContainer finds and returns the container with the given pod ID, full name, and container name.
// It returns nil if not found.
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
podUID = kl.podManager.TranslatePodUID(podUID)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
return pod.FindContainerByName(containerName), nil
}
// Run a command in a container, returns the combined stdout, stderr as an array of bytes
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
// TODO(timstclair): Pass a proper timeout value.
return kl.runner.RunInContainer(container.ID, cmd, 0)
}
// ExecInContainer executes a command in a container, connecting the supplied
// stdin/stdout/stderr to the command's IO streams.
func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return err
}
if container == nil {
return fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty, resize, timeout)
}
// AttachContainer uses the container runtime to attach the given streams to
// the given container.
func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return err
}
if container == nil {
return fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty, resize)
}
// PortForward connects to the pod's port and copies data between the port
// and the stream.
func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16, stream io.ReadWriteCloser) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return err
}
podUID = kl.podManager.TranslatePodUID(podUID)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return fmt.Errorf("pod not found (%q)", podFullName)
}
return streamingRuntime.PortForward(&pod, port, stream)
}
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommand.Options) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the exec directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
default:
return nil, fmt.Errorf("container runtime does not support exec")
}
}
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommand.Options) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the attach directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
// The TTY setting for attach must match the TTY setting in the initial container configuration,
// since whether the process is running in a TTY cannot be changed after it has started. We
// need the api.Pod to get the TTY status.
pod, found := kl.GetPodByFullName(podFullName)
if !found || (string(podUID) != "" && pod.UID != podUID) {
return nil, fmt.Errorf("pod %s not found", podFullName)
}
containerSpec := kubecontainer.GetContainerSpec(pod, containerName)
if containerSpec == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
tty := containerSpec.TTY
return streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
default:
return nil, fmt.Errorf("container runtime does not support attach")
}
}
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the attach directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
podUID = kl.podManager.TranslatePodUID(podUID)
podFullName := kubecontainer.BuildPodFullName(podName, podNamespace)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return nil, fmt.Errorf("pod not found (%q)", podFullName)
}
return streamingRuntime.GetPortForward(podName, podNamespace, podUID)
default:
return nil, fmt.Errorf("container runtime does not support port-forward")
}
}
// cleanupOrphanedPodCgroups removes the Cgroups of pods that should not be
// running and whose volumes have been cleaned up.
func (kl *Kubelet) cleanupOrphanedPodCgroups(
cgroupPods map[types.UID]cm.CgroupName,
pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
// Add all running and existing terminated pods to a set allPods
allPods := sets.NewString()
for _, pod := range pods {
allPods.Insert(string(pod.UID))
}
for _, pod := range runningPods {
allPods.Insert(string(pod.ID))
}
pcm := kl.containerManager.NewPodContainerManager()
// Iterate over all the found pods to verify if they should be running
for uid, val := range cgroupPods {
if allPods.Has(string(uid)) {
continue
}
// If volumes have not been unmounted/detached, do not delete the cgroup in case so the charge does not go to the parent.
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up, Skipping cgroups deletion: %v", uid)
continue
}
glog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid)
// Destroy all cgroups of pod that should not be running,
// by first killing all the attached processes to these cgroups.
// We ignore errors thrown by the method, as the housekeeping loop would
// again try to delete these unwanted pod cgroups
go pcm.Destroy(val)
}
return nil
}
// enableHostUserNamespace determines if the host user namespace should be used by the container runtime.
// Returns true if the pod is using a host pid, pic, or network namespace, the pod is using a non-namespaced
// capability, the pod contains a privileged container, or the pod has a host path volume.
//
// NOTE: when if a container shares any namespace with another container it must also share the user namespace
// or it will not have the correct capabilities in the namespace. This means that host user namespace
// is enabled per pod, not per container.
func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool {
if hasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) {
return true
}
return false
}
// hasPrivilegedContainer returns true if any of the containers in the pod are privileged.
func hasPrivilegedContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil &&
c.SecurityContext.Privileged != nil &&
*c.SecurityContext.Privileged {
return true
}
}
return false
}
// hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container.
func hasNonNamespacedCapability(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
for _, cap := range c.SecurityContext.Capabilities.Add {
if cap == "MKNOD" || cap == "SYS_TIME" || cap == "SYS_MODULE" {
return true
}
}
}
}
return false
}
// hasHostVolume returns true if the pod spec has a HostPath volume.
func hasHostVolume(pod *v1.Pod) bool {
for _, v := range pod.Spec.Volumes {
if v.HostPath != nil {
return true
}
}
return false
}
// hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true.
func hasHostNamespace(pod *v1.Pod) bool {
if pod.Spec.SecurityContext == nil {
return false
}
return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID
}
// hasHostMountPVC returns true if a PVC is referencing a HostPath volume.
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName)
if err != nil {
glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err)
continue
}
if pvc != nil {
referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName)
if err != nil {
glog.Warningf("unable to retrieve pvc %s - %v", pvc.Spec.VolumeName, err)
continue
}
if referencedVolume != nil && referencedVolume.Spec.HostPath != nil {
return true
}
}
}
}
return false
}
| pkg/kubelet/kubelet_pods.go | 1 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.9991200566291809,
0.021732261404395103,
0.00016611700993962586,
0.0008374682511202991,
0.1307878941297531
] |
{
"id": 2,
"code_window": [
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"// returns whether the pod uses the host network namespace.\n",
"func podUsesHostNetwork(pod *v1.Pod) bool {\n",
"\treturn pod.Spec.HostNetwork\n",
"}\n",
"\n",
"// getPullSecretsForPod inspects the Pod and retrieves the referenced pull\n",
"// secrets.\n",
"// TODO: duplicate secrets are being retrieved multiple times and there\n",
"// is no cache. Creating and using a secret manager interface will make this\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/kubelet/kubelet_pods.go",
"type": "replace",
"edit_start_line_idx": 594
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:openapi-gen=true
package abac
import (
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
)
// Policy contains a single ABAC policy rule
type Policy struct {
metav1.TypeMeta
// Spec describes the policy rule
Spec PolicySpec
}
// PolicySpec contains the attributes for a policy rule
type PolicySpec struct {
// User is the username this rule applies to.
// Either user or group is required to match the request.
// "*" matches all users.
User string
// Group is the group this rule applies to.
// Either user or group is required to match the request.
// "*" matches all groups.
Group string
// Readonly matches readonly requests when true, and all requests when false
Readonly bool
// APIGroup is the name of an API group. APIGroup, Resource, and Namespace are required to match resource requests.
// "*" matches all API groups
APIGroup string
// Resource is the name of a resource. APIGroup, Resource, and Namespace are required to match resource requests.
// "*" matches all resources
Resource string
// Namespace is the name of a namespace. APIGroup, Resource, and Namespace are required to match resource requests.
// "*" matches all namespaces (including unnamespaced requests)
Namespace string
// NonResourcePath matches non-resource request paths.
// "*" matches all paths
// "/foo/*" matches all subpaths of foo
NonResourcePath string
// TODO: "expires" string in RFC3339 format.
// TODO: want a way to allow some users to restart containers of a pod but
// not delete or modify it.
// TODO: want a way to allow a controller to create a pod based only on a
// certain podTemplates.
}
| pkg/apis/abac/types.go | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.0013832122785970569,
0.0004033868317492306,
0.00016770247020758688,
0.00019042764324694872,
0.00041074142791330814
] |
{
"id": 2,
"code_window": [
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"// returns whether the pod uses the host network namespace.\n",
"func podUsesHostNetwork(pod *v1.Pod) bool {\n",
"\treturn pod.Spec.HostNetwork\n",
"}\n",
"\n",
"// getPullSecretsForPod inspects the Pod and retrieves the referenced pull\n",
"// secrets.\n",
"// TODO: duplicate secrets are being retrieved multiple times and there\n",
"// is no cache. Creating and using a secret manager interface will make this\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/kubelet/kubelet_pods.go",
"type": "replace",
"edit_start_line_idx": 594
} | #cloud-config
---
write-files:
- path: /etc/conf.d/nfs
permissions: '0644'
content: |
OPTS_RPC_MOUNTD=""
- path: /opt/bin/wupiao
permissions: '0755'
content: |
#!/bin/bash
# [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen
[ -n "$1" ] && \
until curl -o /dev/null -sIf http://${1}; do \
sleep 1 && echo .;
done;
exit $?
hostname: master
coreos:
etcd2:
name: master
listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001
advertise-client-urls: http://$private_ipv4:2379,http://$private_ipv4:4001
initial-cluster-token: k8s_etcd
listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001
initial-advertise-peer-urls: http://$private_ipv4:2380
initial-cluster: master=http://$private_ipv4:2380
initial-cluster-state: new
fleet:
metadata: "role=master"
units:
- name: etcd2.service
command: start
- name: generate-serviceaccount-key.service
command: start
content: |
[Unit]
Description=Generate service-account key file
[Service]
ExecStartPre=-/usr/bin/mkdir -p /opt/bin
ExecStart=/bin/openssl genrsa -out /opt/bin/kube-serviceaccount.key 2048 2>/dev/null
RemainAfterExit=yes
Type=oneshot
- name: setup-network-environment.service
command: start
content: |
[Unit]
Description=Setup Network Environment
Documentation=https://github.com/kelseyhightower/setup-network-environment
Requires=network-online.target
After=network-online.target
[Service]
ExecStartPre=-/usr/bin/mkdir -p /opt/bin
ExecStartPre=/usr/bin/curl -L -o /opt/bin/setup-network-environment -z /opt/bin/setup-network-environment https://github.com/kelseyhightower/setup-network-environment/releases/download/v1.0.0/setup-network-environment
ExecStartPre=/usr/bin/chmod +x /opt/bin/setup-network-environment
ExecStart=/opt/bin/setup-network-environment
RemainAfterExit=yes
Type=oneshot
- name: fleet.service
command: start
- name: flanneld.service
command: start
drop-ins:
- name: 50-network-config.conf
content: |
[Unit]
Requires=etcd2.service
[Service]
ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network":"10.244.0.0/16", "Backend": {"Type": "vxlan"}}'
- name: docker.service
command: start
- name: kube-apiserver.service
command: start
content: |
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
Requires=setup-network-environment.service etcd2.service generate-serviceaccount-key.service
After=setup-network-environment.service etcd2.service generate-serviceaccount-key.service
[Service]
EnvironmentFile=/etc/network-environment
ExecStartPre=-/usr/bin/mkdir -p /opt/bin
ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-apiserver -z /opt/bin/kube-apiserver https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/amd64/kube-apiserver
ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver
ExecStartPre=/opt/bin/wupiao 127.0.0.1:2379/v2/machines
ExecStart=/opt/bin/kube-apiserver \
--service-account-key-file=/opt/bin/kube-serviceaccount.key \
--service-account-lookup=false \
--admission-control=NamespaceLifecycle,NamespaceAutoProvision,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota \
--runtime-config=api/v1 \
--allow-privileged=true \
--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--kubelet-https=true \
--secure-port=6443 \
--service-cluster-ip-range=10.100.0.0/16 \
--etcd-servers=http://127.0.0.1:2379 \
--public-address-override=${DEFAULT_IPV4} \
--logtostderr=true
Restart=always
RestartSec=10
- name: kube-controller-manager.service
command: start
content: |
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
Requires=kube-apiserver.service
After=kube-apiserver.service
[Service]
ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-controller-manager -z /opt/bin/kube-controller-manager https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/amd64/kube-controller-manager
ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-controller-manager
ExecStart=/opt/bin/kube-controller-manager \
--service-account-private-key-file=/opt/bin/kube-serviceaccount.key \
--master=127.0.0.1:8080 \
--logtostderr=true
Restart=always
RestartSec=10
- name: kube-scheduler.service
command: start
content: |
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
Requires=kube-apiserver.service
After=kube-apiserver.service
[Service]
ExecStartPre=/usr/bin/curl -L -o /opt/bin/kube-scheduler -z /opt/bin/kube-scheduler https://storage.googleapis.com/kubernetes-release/release/v1.1.2/bin/linux/amd64/kube-scheduler
ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-scheduler
ExecStart=/opt/bin/kube-scheduler --master=127.0.0.1:8080
Restart=always
RestartSec=10
update:
group: alpha
reboot-strategy: off
| test/fixtures/doc-yaml/getting-started-guides/coreos/cloud-configs/master.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.0001749684161040932,
0.00016933397273533046,
0.00016526466060895473,
0.00016895293083507568,
0.000002274949110869784
] |
{
"id": 2,
"code_window": [
"\t}\n",
"\treturn nil\n",
"}\n",
"\n",
"// returns whether the pod uses the host network namespace.\n",
"func podUsesHostNetwork(pod *v1.Pod) bool {\n",
"\treturn pod.Spec.HostNetwork\n",
"}\n",
"\n",
"// getPullSecretsForPod inspects the Pod and retrieves the referenced pull\n",
"// secrets.\n",
"// TODO: duplicate secrets are being retrieved multiple times and there\n",
"// is no cache. Creating and using a secret manager interface will make this\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/kubelet/kubelet_pods.go",
"type": "replace",
"edit_start_line_idx": 594
} | // Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pbutil defines interfaces for handling Protocol Buffer objects.
package pbutil
import "github.com/coreos/pkg/capnslog"
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "flags")
)
type Marshaler interface {
Marshal() (data []byte, err error)
}
type Unmarshaler interface {
Unmarshal(data []byte) error
}
func MustMarshal(m Marshaler) []byte {
d, err := m.Marshal()
if err != nil {
plog.Panicf("marshal should never fail (%v)", err)
}
return d
}
func MustUnmarshal(um Unmarshaler, data []byte) {
if err := um.Unmarshal(data); err != nil {
plog.Panicf("unmarshal should never fail (%v)", err)
}
}
func MaybeUnmarshal(um Unmarshaler, data []byte) bool {
if err := um.Unmarshal(data); err != nil {
return false
}
return true
}
func GetBool(v *bool) (vv bool, set bool) {
if v == nil {
return false, false
}
return *v, true
}
func Boolp(b bool) *bool { return &b }
| vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.00029301168979145586,
0.00019467156380414963,
0.0001643223367864266,
0.00017237701104022563,
0.000043631567677948624
] |
{
"id": 3,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\tglog.V(4).Infof(\"Cannot get host IP: %v\", err)\n",
"\t\t} else {\n",
"\t\t\ts.HostIP = hostIP.String()\n",
"\t\t\tif podUsesHostNetwork(pod) && s.PodIP == \"\" {\n",
"\t\t\t\ts.PodIP = hostIP.String()\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif kubecontainer.IsHostNetworkPod(pod) && s.PodIP == \"\" {\n"
],
"file_path": "pkg/kubelet/kubelet_pods.go",
"type": "replace",
"edit_start_line_idx": 1068
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/validation"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/envvars"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/term"
utilvalidation "k8s.io/kubernetes/pkg/util/validation"
"k8s.io/kubernetes/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/third_party/forked/golang/expansion"
)
// Get a list of pods that have data directories.
func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
podInfos, err := ioutil.ReadDir(kl.getPodsDir())
if err != nil {
return nil, err
}
pods := []types.UID{}
for i := range podInfos {
if podInfos[i].IsDir() {
pods = append(pods, types.UID(podInfos[i].Name()))
}
}
return pods, nil
}
// getActivePods returns non-terminal pods
func (kl *Kubelet) getActivePods() []*v1.Pod {
allPods := kl.podManager.GetPods()
activePods := kl.filterOutTerminatedPods(allPods)
return activePods
}
// makeDevices determines the devices for the given container.
// Experimental. For now, we hardcode /dev/nvidia0 no matter what the user asks for
// (we only support one device per node).
// TODO: add support for more than 1 GPU after #28216.
func makeDevices(container *v1.Container) []kubecontainer.DeviceInfo {
nvidiaGPULimit := container.Resources.Limits.NvidiaGPU()
if nvidiaGPULimit.Value() != 0 {
return []kubecontainer.DeviceInfo{
{PathOnHost: "/dev/nvidia0", PathInContainer: "/dev/nvidia0", Permissions: "mrw"},
{PathOnHost: "/dev/nvidiactl", PathInContainer: "/dev/nvidiactl", Permissions: "mrw"},
{PathOnHost: "/dev/nvidia-uvm", PathInContainer: "/dev/nvidia-uvm", Permissions: "mrw"},
}
}
return nil
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) {
// Kubernetes only mounts on /etc/hosts if :
// - container does not use hostNetwork and
// - container is not an infrastructure(pause) container
// - container is not already mounting on /etc/hosts
// When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set.
// OS is not Windows
mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.HostNetwork) && len(podIP) > 0 && runtime.GOOS != "windows"
glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
for _, mount := range container.VolumeMounts {
mountEtcHostsFile = mountEtcHostsFile && (mount.MountPath != etcHostsPath)
vol, ok := podVolumes[mount.Name]
if !ok {
glog.Warningf("Mount cannot be satisfied for container %q, because the volume is missing: %q", container.Name, mount)
continue
}
relabelVolume := false
// If the volume supports SELinux and it has not been
// relabeled already and it is not a read-only volume,
// relabel it and mark it as labeled
if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled {
vol.SELinuxLabeled = true
relabelVolume = true
}
hostPath, err := volume.GetPath(vol.Mounter)
if err != nil {
return nil, err
}
if mount.SubPath != "" {
hostPath = filepath.Join(hostPath, mount.SubPath)
}
// Docker Volume Mounts fail on Windows if it is not of the form C:/
containerPath := mount.MountPath
if runtime.GOOS == "windows" {
if strings.HasPrefix(hostPath, "/") && !strings.Contains(hostPath, ":") {
hostPath = "c:" + hostPath
}
if strings.HasPrefix(containerPath, "/") && !strings.Contains(containerPath, ":") {
containerPath = "c:" + containerPath
}
}
mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name,
ContainerPath: containerPath,
HostPath: hostPath,
ReadOnly: mount.ReadOnly,
SELinuxRelabel: relabelVolume,
})
}
if mountEtcHostsFile {
hostsMount, err := makeHostsMount(podDir, podIP, hostName, hostDomain)
if err != nil {
return nil, err
}
mounts = append(mounts, *hostsMount)
}
return mounts, nil
}
// makeHostsMount makes the mountpoint for the hosts file that the containers
// in a pod are injected with.
func makeHostsMount(podDir, podIP, hostName, hostDomainName string) (*kubecontainer.Mount, error) {
hostsFilePath := path.Join(podDir, "etc-hosts")
if err := ensureHostsFile(hostsFilePath, podIP, hostName, hostDomainName); err != nil {
return nil, err
}
return &kubecontainer.Mount{
Name: "k8s-managed-etc-hosts",
ContainerPath: etcHostsPath,
HostPath: hostsFilePath,
ReadOnly: false,
SELinuxRelabel: true,
}, nil
}
// ensureHostsFile ensures that the given host file has an up-to-date ip, host
// name, and domain name.
func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string) error {
if _, err := os.Stat(fileName); os.IsExist(err) {
glog.V(4).Infof("kubernetes-managed etc-hosts file exits. Will not be recreated: %q", fileName)
return nil
}
var buffer bytes.Buffer
buffer.WriteString("# Kubernetes-managed hosts file.\n")
buffer.WriteString("127.0.0.1\tlocalhost\n") // ipv4 localhost
buffer.WriteString("::1\tlocalhost ip6-localhost ip6-loopback\n") // ipv6 localhost
buffer.WriteString("fe00::0\tip6-localnet\n")
buffer.WriteString("fe00::0\tip6-mcastprefix\n")
buffer.WriteString("fe00::1\tip6-allnodes\n")
buffer.WriteString("fe00::2\tip6-allrouters\n")
if len(hostDomainName) > 0 {
buffer.WriteString(fmt.Sprintf("%s\t%s.%s\t%s\n", hostIP, hostName, hostDomainName, hostName))
} else {
buffer.WriteString(fmt.Sprintf("%s\t%s\n", hostIP, hostName))
}
return ioutil.WriteFile(fileName, buffer.Bytes(), 0644)
}
func makePortMappings(container *v1.Container) (ports []kubecontainer.PortMapping) {
names := make(map[string]struct{})
for _, p := range container.Ports {
pm := kubecontainer.PortMapping{
HostPort: int(p.HostPort),
ContainerPort: int(p.ContainerPort),
Protocol: p.Protocol,
HostIP: p.HostIP,
}
// We need to create some default port name if it's not specified, since
// this is necessary for rkt.
// http://issue.k8s.io/7710
if p.Name == "" {
pm.Name = fmt.Sprintf("%s-%s:%d", container.Name, p.Protocol, p.ContainerPort)
} else {
pm.Name = fmt.Sprintf("%s-%s", container.Name, p.Name)
}
// Protect against exposing the same protocol-port more than once in a container.
if _, ok := names[pm.Name]; ok {
glog.Warningf("Port name conflicted, %q is defined more than once", pm.Name)
continue
}
ports = append(ports, pm)
names[pm.Name] = struct{}{}
}
return
}
// truncatePodHostnameIfNeeded truncates the pod hostname if it's longer than 63 chars.
func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
// Cap hostname at 63 chars (specification is 64bytes which is 63 chars and the null terminating char).
const hostnameMaxLen = 63
if len(hostname) <= hostnameMaxLen {
return hostname, nil
}
truncated := hostname[:hostnameMaxLen]
glog.Errorf("hostname for pod:%q was longer than %d. Truncated hostname to :%q", podName, hostnameMaxLen, truncated)
// hostname should not end with '-' or '.'
truncated = strings.TrimRight(truncated, "-.")
if len(truncated) == 0 {
// This should never happen.
return "", fmt.Errorf("hostname for pod %q was invalid: %q", podName, hostname)
}
return truncated, nil
}
// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod,
// given that pod's spec and annotations or returns an error.
func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
// TODO(vmarmol): Handle better.
clusterDomain := kl.clusterDomain
podAnnotations := pod.Annotations
if podAnnotations == nil {
podAnnotations = make(map[string]string)
}
hostname := pod.Name
if len(pod.Spec.Hostname) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Hostname); len(msgs) != 0 {
return "", "", fmt.Errorf("Pod Hostname %q is not a valid DNS label: %s", pod.Spec.Hostname, strings.Join(msgs, ";"))
}
hostname = pod.Spec.Hostname
} else {
hostnameCandidate := podAnnotations[utilpod.PodHostnameAnnotation]
if len(utilvalidation.IsDNS1123Label(hostnameCandidate)) == 0 {
// use hostname annotation, if specified.
hostname = hostnameCandidate
}
}
hostname, err := truncatePodHostnameIfNeeded(pod.Name, hostname)
if err != nil {
return "", "", err
}
hostDomain := ""
if len(pod.Spec.Subdomain) > 0 {
if msgs := utilvalidation.IsDNS1123Label(pod.Spec.Subdomain); len(msgs) != 0 {
return "", "", fmt.Errorf("Pod Subdomain %q is not a valid DNS label: %s", pod.Spec.Subdomain, strings.Join(msgs, ";"))
}
hostDomain = fmt.Sprintf("%s.%s.svc.%s", pod.Spec.Subdomain, pod.Namespace, clusterDomain)
} else {
subdomainCandidate := pod.Annotations[utilpod.PodSubdomainAnnotation]
if len(utilvalidation.IsDNS1123Label(subdomainCandidate)) == 0 {
hostDomain = fmt.Sprintf("%s.%s.svc.%s", subdomainCandidate, pod.Namespace, clusterDomain)
}
}
return hostname, hostDomain, nil
}
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
var err error
pcm := kl.containerManager.NewPodContainerManager()
_, podContainerName := pcm.GetPodContainerName(pod)
opts := &kubecontainer.RunContainerOptions{CgroupParent: podContainerName}
hostname, hostDomainName, err := kl.GeneratePodHostNameAndDomain(pod)
if err != nil {
return nil, err
}
opts.Hostname = hostname
podName := volumehelper.GetUniquePodName(pod)
volumes := kl.volumeManager.GetMountedVolumesForPod(podName)
opts.PortMappings = makePortMappings(container)
opts.Devices = makeDevices(container)
opts.Mounts, err = makeMounts(pod, kl.getPodDir(pod.UID), container, hostname, hostDomainName, podIP, volumes)
if err != nil {
return nil, err
}
opts.Envs, err = kl.makeEnvironmentVariables(pod, container, podIP)
if err != nil {
return nil, err
}
// Disabling adding TerminationMessagePath on Windows as these files would be mounted as docker volume and
// Docker for Windows has a bug where only directories can be mounted
if len(container.TerminationMessagePath) != 0 && runtime.GOOS != "windows" {
p := kl.getPodContainerDir(pod.UID, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
glog.Errorf("Error on creating %q: %v", p, err)
} else {
opts.PodContainerDir = p
}
}
opts.DNS, opts.DNSSearch, err = kl.GetClusterDNS(pod)
if err != nil {
return nil, err
}
// only do this check if the experimental behavior is enabled, otherwise allow it to default to false
if kl.experimentalHostUserNamespaceDefaulting {
opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod)
}
return opts, nil
}
var masterServices = sets.NewString("kubernetes")
// getServiceEnvVarMap makes a map[string]string of env vars for services a
// pod in namespace ns should see.
func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
var (
serviceMap = make(map[string]*v1.Service)
m = make(map[string]string)
)
// Get all service resources from the master (via a cache),
// and populate them into service environment variables.
if kl.serviceLister == nil {
// Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars.
return m, nil
}
services, err := kl.serviceLister.List(labels.Everything())
if err != nil {
return m, fmt.Errorf("failed to list services when setting up env vars.")
}
// project the services in namespace ns onto the master services
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
if !v1.IsServiceIPSet(service) {
continue
}
serviceName := service.Name
switch service.Namespace {
// for the case whether the master service namespace is the namespace the pod
// is in, the pod should receive all the services in the namespace.
//
// ordering of the case clauses below enforces this
case ns:
serviceMap[serviceName] = service
case kl.masterServiceNamespace:
if masterServices.Has(serviceName) {
if _, exists := serviceMap[serviceName]; !exists {
serviceMap[serviceName] = service
}
}
}
}
mappedServices := []*v1.Service{}
for key := range serviceMap {
mappedServices = append(mappedServices, serviceMap[key])
}
for _, e := range envvars.FromServices(mappedServices) {
m[e.Name] = e.Value
}
return m, nil
}
// Make the environment variables for a pod in the given namespace.
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) {
var result []kubecontainer.EnvVar
// Note: These are added to the docker Config, but are not included in the checksum computed
// by dockertools.BuildDockerName(...). That way, we can still determine whether an
// v1.Container is already running by its hash. (We don't want to restart a container just
// because some service changed.)
//
// Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
// To avoid this users can: (1) wait between starting a service and starting; or (2) detect
// missing service env var and exit and be restarted; or (3) use DNS instead of env vars
// and keep trying to resolve the DNS name of the service (recommended).
serviceEnv, err := kl.getServiceEnvVarMap(pod.Namespace)
if err != nil {
return result, err
}
// Determine the final values of variables:
//
// 1. Determine the final value of each variable:
// a. If the variable's Value is set, expand the `$(var)` references to other
// variables in the .Value field; the sources of variables are the declared
// variables of the container and the service environment variables
// b. If a source is defined for an environment variable, resolve the source
// 2. Create the container's environment in the order variables are declared
// 3. Add remaining service environment vars
var (
tmpEnv = make(map[string]string)
configMaps = make(map[string]*v1.ConfigMap)
secrets = make(map[string]*v1.Secret)
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
)
for _, envVar := range container.Env {
// Accesses apiserver+Pods.
// So, the master may set service env vars, or kubelet may. In case both are doing
// it, we delete the key from the kubelet-generated ones so we don't have duplicate
// env vars.
// TODO: remove this net line once all platforms use apiserver+Pods.
delete(serviceEnv, envVar.Name)
runtimeVal := envVar.Value
if runtimeVal != "" {
// Step 1a: expand variable references
runtimeVal = expansion.Expand(runtimeVal, mappingFunc)
} else if envVar.ValueFrom != nil {
// Step 1b: resolve alternate env var sources
switch {
case envVar.ValueFrom.FieldRef != nil:
runtimeVal, err = kl.podFieldSelectorRuntimeValue(envVar.ValueFrom.FieldRef, pod, podIP)
if err != nil {
return result, err
}
case envVar.ValueFrom.ResourceFieldRef != nil:
defaultedPod, defaultedContainer, err := kl.defaultPodLimitsForDownwardApi(pod, container)
if err != nil {
return result, err
}
runtimeVal, err = containerResourceRuntimeValue(envVar.ValueFrom.ResourceFieldRef, defaultedPod, defaultedContainer)
if err != nil {
return result, err
}
case envVar.ValueFrom.ConfigMapKeyRef != nil:
name := envVar.ValueFrom.ConfigMapKeyRef.Name
key := envVar.ValueFrom.ConfigMapKeyRef.Key
configMap, ok := configMaps[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("Couldn't get configMap %v/%v, no kubeClient defined", pod.Namespace, name)
}
configMap, err = kl.kubeClient.Core().ConfigMaps(pod.Namespace).Get(name)
if err != nil {
return result, err
}
configMaps[name] = configMap
}
runtimeVal, ok = configMap.Data[key]
if !ok {
return result, fmt.Errorf("Couldn't find key %v in ConfigMap %v/%v", key, pod.Namespace, name)
}
case envVar.ValueFrom.SecretKeyRef != nil:
name := envVar.ValueFrom.SecretKeyRef.Name
key := envVar.ValueFrom.SecretKeyRef.Key
secret, ok := secrets[name]
if !ok {
if kl.kubeClient == nil {
return result, fmt.Errorf("Couldn't get secret %v/%v, no kubeClient defined", pod.Namespace, name)
}
secret, err = kl.kubeClient.Core().Secrets(pod.Namespace).Get(name)
if err != nil {
return result, err
}
secrets[name] = secret
}
runtimeValBytes, ok := secret.Data[key]
if !ok {
return result, fmt.Errorf("Couldn't find key %v in Secret %v/%v", key, pod.Namespace, name)
}
runtimeVal = string(runtimeValBytes)
}
}
tmpEnv[envVar.Name] = runtimeVal
result = append(result, kubecontainer.EnvVar{Name: envVar.Name, Value: tmpEnv[envVar.Name]})
}
// Append remaining service env vars.
for k, v := range serviceEnv {
result = append(result, kubecontainer.EnvVar{Name: k, Value: v})
}
return result, nil
}
// podFieldSelectorRuntimeValue returns the runtime value of the given
// selector for a pod.
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) {
internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
if err != nil {
return "", err
}
switch internalFieldPath {
case "spec.nodeName":
return pod.Spec.NodeName, nil
case "spec.serviceAccountName":
return pod.Spec.ServiceAccountName, nil
case "status.podIP":
return podIP, nil
}
return fieldpath.ExtractFieldPathAsString(pod, internalFieldPath)
}
// containerResourceRuntimeValue returns the value of the provided container resource
func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) {
containerName := fs.ContainerName
if len(containerName) == 0 {
return fieldpath.ExtractContainerResourceValue(fs, container)
} else {
return fieldpath.ExtractResourceValueByContainerName(fs, pod, containerName)
}
}
// One of the following arguments must be non-nil: runningPod, status.
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
var p kubecontainer.Pod
if runningPod != nil {
p = *runningPod
} else if status != nil {
p = kubecontainer.ConvertPodStatusToRunningPod(kl.GetRuntime().Type(), status)
}
// cache the pod cgroup Name for reducing the cpu resource limits of the pod cgroup once the pod is killed
pcm := kl.containerManager.NewPodContainerManager()
var podCgroup cm.CgroupName
reduceCpuLimts := true
if pod != nil {
podCgroup, _ = pcm.GetPodContainerName(pod)
} else {
// If the pod is nil then cgroup limit must have already
// been decreased earlier
reduceCpuLimts = false
}
// Call the container runtime KillPod method which stops all running containers of the pod
if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil {
return err
}
// At this point the pod might not completely free up cpu and memory resources.
// In such a case deleting the pod's cgroup might cause the pod's charges to be transferred
// to the parent cgroup. There might be various kinds of pod charges at this point.
// For example, any volume used by the pod that was backed by memory will have its
// pages charged to the pod cgroup until those volumes are removed by the kubelet.
// Hence we only reduce the cpu resource limits of the pod's cgroup
// and defer the responsibilty of destroying the pod's cgroup to the
// cleanup method and the housekeeping loop.
if reduceCpuLimts {
pcm.ReduceCPULimits(podCgroup)
}
return nil
}
// makePodDataDirs creates the dirs for the pod datas.
func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error {
uid := pod.UID
if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
if err := os.MkdirAll(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// returns whether the pod uses the host network namespace.
func podUsesHostNetwork(pod *v1.Pod) bool {
return pod.Spec.HostNetwork
}
// getPullSecretsForPod inspects the Pod and retrieves the referenced pull
// secrets.
// TODO: duplicate secrets are being retrieved multiple times and there
// is no cache. Creating and using a secret manager interface will make this
// easier to address.
func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) ([]v1.Secret, error) {
pullSecrets := []v1.Secret{}
for _, secretRef := range pod.Spec.ImagePullSecrets {
secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name)
if err != nil {
glog.Warningf("Unable to retrieve pull secret %s/%s for %s/%s due to %v. The image pull may not succeed.", pod.Namespace, secretRef.Name, pod.Namespace, pod.Name, err)
continue
}
pullSecrets = append(pullSecrets, *secret)
}
return pullSecrets, nil
}
// Returns true if pod is in the terminated state ("Failed" or "Succeeded").
func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool {
var status v1.PodStatus
// Check the cached pod status which was set after the last sync.
status, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
status = pod.Status
}
if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded {
return true
}
return false
}
// filterOutTerminatedPods returns the given pods which the status manager
// does not consider failed or succeeded.
func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {
var filteredPods []*v1.Pod
for _, p := range pods {
if kl.podIsTerminated(p) {
continue
}
filteredPods = append(filteredPods, p)
}
return filteredPods
}
// removeOrphanedPodStatuses removes obsolete entries in podStatus where
// the pod is no longer considered bound to this node.
func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) {
podUIDs := make(map[types.UID]bool)
for _, pod := range pods {
podUIDs[pod.UID] = true
}
for _, pod := range mirrorPods {
podUIDs[pod.UID] = true
}
kl.statusManager.RemoveOrphanedStatuses(podUIDs)
}
// HandlePodCleanups performs a series of cleanup work, including terminating
// pod workers, killing unwanted pods, and removing orphaned volumes/pod
// directories.
// NOTE: This function is executed by the main sync loop, so it
// should not contain any blocking calls.
func (kl *Kubelet) HandlePodCleanups() error {
// The kubelet lacks checkpointing, so we need to introspect the set of pods
// in the cgroup tree prior to inspecting the set of pods in our pod manager.
// this ensures our view of the cgroup tree does not mistakenly observe pods
// that are added after the fact...
var (
cgroupPods map[types.UID]cm.CgroupName
err error
)
if kl.cgroupsPerQOS {
pcm := kl.containerManager.NewPodContainerManager()
cgroupPods, err = pcm.GetAllPodsFromCgroups()
if err != nil {
return fmt.Errorf("failed to get list of pods that still exist on cgroup mounts: %v", err)
}
}
allPods, mirrorPods := kl.podManager.GetPodsAndMirrorPods()
// Pod phase progresses monotonically. Once a pod has reached a final state,
// it should never leave regardless of the restart policy. The statuses
// of such pods should not be changed, and there is no need to sync them.
// TODO: the logic here does not handle two cases:
// 1. If the containers were removed immediately after they died, kubelet
// may fail to generate correct statuses, let alone filtering correctly.
// 2. If kubelet restarted before writing the terminated status for a pod
// to the apiserver, it could still restart the terminated pod (even
// though the pod was not considered terminated by the apiserver).
// These two conditions could be alleviated by checkpointing kubelet.
activePods := kl.filterOutTerminatedPods(allPods)
desiredPods := make(map[types.UID]empty)
for _, pod := range activePods {
desiredPods[pod.UID] = empty{}
}
// Stop the workers for no-longer existing pods.
// TODO: is here the best place to forget pod workers?
kl.podWorkers.ForgetNonExistingPodWorkers(desiredPods)
kl.probeManager.CleanupPods(activePods)
runningPods, err := kl.runtimeCache.GetPods()
if err != nil {
glog.Errorf("Error listing containers: %#v", err)
return err
}
for _, pod := range runningPods {
if _, found := desiredPods[pod.ID]; !found {
kl.podKillingCh <- &kubecontainer.PodPair{APIPod: nil, RunningPod: pod}
}
}
kl.removeOrphanedPodStatuses(allPods, mirrorPods)
// Note that we just killed the unwanted pods. This may not have reflected
// in the cache. We need to bypass the cache to get the latest set of
// running pods to clean up the volumes.
// TODO: Evaluate the performance impact of bypassing the runtime cache.
runningPods, err = kl.containerRuntime.GetPods(false)
if err != nil {
glog.Errorf("Error listing containers: %#v", err)
return err
}
// Remove any orphaned volumes.
// Note that we pass all pods (including terminated pods) to the function,
// so that we don't remove volumes associated with terminated but not yet
// deleted pods.
err = kl.cleanupOrphanedPodDirs(allPods, runningPods)
if err != nil {
// We want all cleanup tasks to be run even if one of them failed. So
// we just log an error here and continue other cleanup tasks.
// This also applies to the other clean up tasks.
glog.Errorf("Failed cleaning up orphaned pod directories: %v", err)
}
// Remove any orphaned mirror pods.
kl.podManager.DeleteOrphanedMirrorPods()
// Clear out any old bandwidth rules
err = kl.cleanupBandwidthLimits(allPods)
if err != nil {
glog.Errorf("Failed cleaning up bandwidth limits: %v", err)
}
// Remove any cgroups in the hierarchy for pods that should no longer exist
if kl.cgroupsPerQOS {
kl.cleanupOrphanedPodCgroups(cgroupPods, allPods, runningPods)
}
kl.backOff.GC()
return nil
}
// podKiller launches a goroutine to kill a pod received from the channel if
// another goroutine isn't already in action.
func (kl *Kubelet) podKiller() {
killing := sets.NewString()
resultCh := make(chan types.UID)
defer close(resultCh)
for {
select {
case podPair, ok := <-kl.podKillingCh:
if !ok {
return
}
runningPod := podPair.RunningPod
apiPod := podPair.APIPod
if killing.Has(string(runningPod.ID)) {
// The pod is already being killed.
break
}
killing.Insert(string(runningPod.ID))
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) {
defer func() {
ch <- runningPod.ID
}()
glog.V(2).Infof("Killing unwanted pod %q", runningPod.Name)
err := kl.killPod(apiPod, runningPod, nil, nil)
if err != nil {
glog.Errorf("Failed killing the pod %q: %v", runningPod.Name, err)
}
}(apiPod, runningPod, resultCh)
case podID := <-resultCh:
killing.Delete(string(podID))
}
}
}
// checkHostPortConflicts detects pods with conflicted host ports.
func hasHostPortConflicts(pods []*v1.Pod) bool {
ports := sets.String{}
for _, pod := range pods {
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 {
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
return true
}
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.InitContainers, &ports, field.NewPath("spec", "initContainers")); len(errs) > 0 {
glog.Errorf("Pod %q: HostPort is already allocated, ignoring: %v", format.Pod(pod), errs)
return true
}
}
return false
}
// validateContainerLogStatus returns the container ID for the desired container to retrieve logs for, based on the state
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := v1.GetContainerStatus(podStatus.ContainerStatuses, containerName)
// if not found, check the init containers
if !found {
cStatus, found = v1.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
}
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
}
lastState := cStatus.LastTerminationState
waiting, running, terminated := cStatus.State.Waiting, cStatus.State.Running, cStatus.State.Terminated
switch {
case previous:
if lastState.Terminated == nil {
return kubecontainer.ContainerID{}, fmt.Errorf("previous terminated container %q in pod %q not found", containerName, podName)
}
cID = lastState.Terminated.ContainerID
case running != nil:
cID = cStatus.ContainerID
case terminated != nil:
cID = terminated.ContainerID
case lastState.Terminated != nil:
cID = lastState.Terminated.ContainerID
case waiting != nil:
// output some info for the most common pending failures
switch reason := waiting.Reason; reason {
case images.ErrImagePull.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: image can't be pulled", containerName, podName)
case images.ErrImagePullBackOff.Error():
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: trying and failing to pull image", containerName, podName)
default:
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start: %v", containerName, podName, reason)
}
default:
// unrecognized state
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is waiting to start - no logs yet", containerName, podName)
}
return kubecontainer.ParseContainerID(cID), nil
}
// GetKubeletContainerLogs returns logs from the container
// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt
// or all of them.
func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
// Pod workers periodically write status to statusManager. If status is not
// cached there, something is wrong (or kubelet just restarted and hasn't
// caught up yet). Just assume the pod is not ready yet.
name, namespace, err := kubecontainer.ParsePodFullName(podFullName)
if err != nil {
return fmt.Errorf("unable to parse pod full name %q: %v", podFullName, err)
}
pod, ok := kl.GetPodByName(namespace, name)
if !ok {
return fmt.Errorf("pod %q cannot be found - no logs available", name)
}
podUID := pod.UID
if mirrorPod, ok := kl.podManager.GetMirrorPodByPod(pod); ok {
podUID = mirrorPod.UID
}
podStatus, found := kl.statusManager.GetPodStatus(podUID)
if !found {
// If there is no cached status, use the status from the
// apiserver. This is useful if kubelet has recently been
// restarted.
podStatus = pod.Status
}
// TODO: Consolidate the logic here with kuberuntime.GetContainerLogs, here we convert container name to containerID,
// but inside kuberuntime we convert container id back to container name and restart count.
// TODO: After separate container log lifecycle management, we should get log based on the existing log files
// instead of container status.
containerID, err := kl.validateContainerLogStatus(pod.Name, &podStatus, containerName, logOptions.Previous)
if err != nil {
return err
}
// Do a zero-byte write to stdout before handing off to the container runtime.
// This ensures at least one Write call is made to the writer when copying starts,
// even if we then block waiting for log output from the container.
if _, err := stdout.Write([]byte{}); err != nil {
return err
}
return kl.containerRuntime.GetContainerLogs(pod, containerID, logOptions, stdout, stderr)
}
// GetPhase returns the phase of a pod given its container info.
// This func is exported to simplify integration with 3rd party kubelet
// integrations like kubernetes-mesos.
func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
initialized := 0
pendingInitialization := 0
failedInitialization := 0
for _, container := range spec.InitContainers {
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
if !ok {
pendingInitialization++
continue
}
switch {
case containerStatus.State.Running != nil:
pendingInitialization++
case containerStatus.State.Terminated != nil:
if containerStatus.State.Terminated.ExitCode == 0 {
initialized++
} else {
failedInitialization++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
if containerStatus.LastTerminationState.Terminated.ExitCode == 0 {
initialized++
} else {
failedInitialization++
}
} else {
pendingInitialization++
}
default:
pendingInitialization++
}
}
unknown := 0
running := 0
waiting := 0
stopped := 0
failed := 0
succeeded := 0
for _, container := range spec.Containers {
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
if !ok {
unknown++
continue
}
switch {
case containerStatus.State.Running != nil:
running++
case containerStatus.State.Terminated != nil:
stopped++
if containerStatus.State.Terminated.ExitCode == 0 {
succeeded++
} else {
failed++
}
case containerStatus.State.Waiting != nil:
if containerStatus.LastTerminationState.Terminated != nil {
stopped++
} else {
waiting++
}
default:
unknown++
}
}
if failedInitialization > 0 && spec.RestartPolicy == v1.RestartPolicyNever {
return v1.PodFailed
}
switch {
case pendingInitialization > 0:
fallthrough
case waiting > 0:
glog.V(5).Infof("pod waiting > 0, pending")
// One or more containers has not been started
return v1.PodPending
case running > 0 && unknown == 0:
// All containers have been started, and at least
// one container is running
return v1.PodRunning
case running == 0 && stopped > 0 && unknown == 0:
// All containers are terminated
if spec.RestartPolicy == v1.RestartPolicyAlways {
// All containers are in the process of restarting
return v1.PodRunning
}
if stopped == succeeded {
// RestartPolicy is not Always, and all
// containers are terminated in success
return v1.PodSucceeded
}
if spec.RestartPolicy == v1.RestartPolicyNever {
// RestartPolicy is Never, and all containers are
// terminated with at least one in failure
return v1.PodFailed
}
// RestartPolicy is OnFailure, and at least one in failure
// and in the process of restarting
return v1.PodRunning
default:
glog.V(5).Infof("pod default case, pending")
return v1.PodPending
}
}
// generateAPIPodStatus creates the final API pod status for a pod, given the
// internal pod status.
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
glog.V(3).Infof("Generating status for %q", format.Pod(pod))
// check if an internal module has requested the pod is evicted.
for _, podSyncHandler := range kl.PodSyncHandlers {
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
return v1.PodStatus{
Phase: v1.PodFailed,
Reason: result.Reason,
Message: result.Message,
}
}
}
s := kl.convertStatusToAPIStatus(pod, podStatus)
// Assume info is ready to process
spec := &pod.Spec
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = GetPhase(spec, allStatus)
kl.probeManager.UpdatePodStatus(pod.UID, s)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
// it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true.
if _, oldPodScheduled := v1.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
s.Conditions = append(s.Conditions, *oldPodScheduled)
}
v1.UpdatePodCondition(&pod.Status, &v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
})
if !kl.standaloneMode {
hostIP, err := kl.getHostIPAnyWay()
if err != nil {
glog.V(4).Infof("Cannot get host IP: %v", err)
} else {
s.HostIP = hostIP.String()
if podUsesHostNetwork(pod) && s.PodIP == "" {
s.PodIP = hostIP.String()
}
}
}
return *s
}
// convertStatusToAPIStatus creates an api PodStatus for the given pod from
// the given internal pod status. It is purely transformative and does not
// alter the kubelet state at all.
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *v1.PodStatus {
var apiPodStatus v1.PodStatus
apiPodStatus.PodIP = podStatus.IP
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
pod.Status.ContainerStatuses,
pod.Spec.Containers,
len(pod.Spec.InitContainers) > 0,
false,
)
apiPodStatus.InitContainerStatuses = kl.convertToAPIContainerStatuses(
pod, podStatus,
pod.Status.InitContainerStatuses,
pod.Spec.InitContainers,
len(pod.Spec.InitContainers) > 0,
true,
)
return &apiPodStatus
}
// convertToAPIContainerStatuses converts the given internal container
// statuses into API container statuses.
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *v1.ContainerStatus {
cid := cs.ID.String()
status := &v1.ContainerStatus{
Name: cs.Name,
RestartCount: int32(cs.RestartCount),
Image: cs.Image,
ImageID: cs.ImageID,
ContainerID: cid,
}
switch cs.State {
case kubecontainer.ContainerStateRunning:
status.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.NewTime(cs.StartedAt)}
case kubecontainer.ContainerStateExited:
status.State.Terminated = &v1.ContainerStateTerminated{
ExitCode: int32(cs.ExitCode),
Reason: cs.Reason,
Message: cs.Message,
StartedAt: metav1.NewTime(cs.StartedAt),
FinishedAt: metav1.NewTime(cs.FinishedAt),
ContainerID: cid,
}
default:
status.State.Waiting = &v1.ContainerStateWaiting{}
}
return status
}
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]v1.ContainerStatus, len(containers))
for _, status := range previousStatus {
oldStatuses[status.Name] = status
}
// Set all container statuses to default waiting state
statuses := make(map[string]*v1.ContainerStatus, len(containers))
defaultWaitingState := v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ContainerCreating"}}
if hasInitContainers {
defaultWaitingState = v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "PodInitializing"}}
}
for _, container := range containers {
status := &v1.ContainerStatus{
Name: container.Name,
Image: container.Image,
State: defaultWaitingState,
}
// Apply some values from the old statuses as the default values.
if oldStatus, found := oldStatuses[container.Name]; found {
status.RestartCount = oldStatus.RestartCount
status.LastTerminationState = oldStatus.LastTerminationState
}
statuses[container.Name] = status
}
// Make the latest container status comes first.
sort.Sort(sort.Reverse(kubecontainer.SortContainerStatusesByCreationTime(podStatus.ContainerStatuses)))
// Set container statuses according to the statuses seen in pod status
containerSeen := map[string]int{}
for _, cStatus := range podStatus.ContainerStatuses {
cName := cStatus.Name
if _, ok := statuses[cName]; !ok {
// This would also ignore the infra container.
continue
}
if containerSeen[cName] >= 2 {
continue
}
status := convertContainerStatus(cStatus)
if containerSeen[cName] == 0 {
statuses[cName] = status
} else {
statuses[cName].LastTerminationState = status.State
}
containerSeen[cName] = containerSeen[cName] + 1
}
// Handle the containers failed to be started, which should be in Waiting state.
for _, container := range containers {
if isInitContainer {
// If the init container is terminated with exit code 0, it won't be restarted.
// TODO(random-liu): Handle this in a cleaner way.
s := podStatus.FindContainerStatusByName(container.Name)
if s != nil && s.State == kubecontainer.ContainerStateExited && s.ExitCode == 0 {
continue
}
}
// If a container should be restarted in next syncpod, it is *Waiting*.
if !kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
continue
}
status := statuses[container.Name]
reason, message, ok := kl.reasonCache.Get(pod.UID, container.Name)
if !ok {
// In fact, we could also apply Waiting state here, but it is less informative,
// and the container will be restarted soon, so we prefer the original state here.
// Note that with the current implementation of ShouldContainerBeRestarted the original state here
// could be:
// * Waiting: There is no associated historical container and start failure reason record.
// * Terminated: The container is terminated.
continue
}
if status.State.Terminated != nil {
status.LastTerminationState = status.State
}
status.State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: reason.Error(),
Message: message,
},
}
statuses[container.Name] = status
}
var containerStatuses []v1.ContainerStatus
for _, status := range statuses {
containerStatuses = append(containerStatuses, *status)
}
// Sort the container statuses since clients of this interface expect the list
// of containers in a pod has a deterministic order.
if isInitContainer {
kubetypes.SortInitContainerStatuses(pod, containerStatuses)
} else {
sort.Sort(kubetypes.SortedContainerStatuses(containerStatuses))
}
return containerStatuses
}
// Returns logs of current machine.
func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
// TODO: whitelist logs we are willing to serve
kl.logServer.ServeHTTP(w, req)
}
// findContainer finds and returns the container with the given pod ID, full name, and container name.
// It returns nil if not found.
func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) {
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
podUID = kl.podManager.TranslatePodUID(podUID)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
return pod.FindContainerByName(containerName), nil
}
// Run a command in a container, returns the combined stdout, stderr as an array of bytes
func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) {
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
// TODO(timstclair): Pass a proper timeout value.
return kl.runner.RunInContainer(container.ID, cmd, 0)
}
// ExecInContainer executes a command in a container, connecting the supplied
// stdin/stdout/stderr to the command's IO streams.
func (kl *Kubelet) ExecInContainer(podFullName string, podUID types.UID, containerName string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size, timeout time.Duration) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return err
}
if container == nil {
return fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.ExecInContainer(container.ID, cmd, stdin, stdout, stderr, tty, resize, timeout)
}
// AttachContainer uses the container runtime to attach the given streams to
// the given container.
func (kl *Kubelet) AttachContainer(podFullName string, podUID types.UID, containerName string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return err
}
if container == nil {
return fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.AttachContainer(container.ID, stdin, stdout, stderr, tty, resize)
}
// PortForward connects to the pod's port and copies data between the port
// and the stream.
func (kl *Kubelet) PortForward(podFullName string, podUID types.UID, port uint16, stream io.ReadWriteCloser) error {
streamingRuntime, ok := kl.containerRuntime.(kubecontainer.DirectStreamingRuntime)
if !ok {
return fmt.Errorf("streaming methods not supported by runtime")
}
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return err
}
podUID = kl.podManager.TranslatePodUID(podUID)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return fmt.Errorf("pod not found (%q)", podFullName)
}
return streamingRuntime.PortForward(&pod, port, stream)
}
// GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommand.Options) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the exec directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container not found (%q)", containerName)
}
return streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY)
default:
return nil, fmt.Errorf("container runtime does not support exec")
}
}
// GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommand.Options) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the attach directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
container, err := kl.findContainer(podFullName, podUID, containerName)
if err != nil {
return nil, err
}
if container == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
// The TTY setting for attach must match the TTY setting in the initial container configuration,
// since whether the process is running in a TTY cannot be changed after it has started. We
// need the api.Pod to get the TTY status.
pod, found := kl.GetPodByFullName(podFullName)
if !found || (string(podUID) != "" && pod.UID != podUID) {
return nil, fmt.Errorf("pod %s not found", podFullName)
}
containerSpec := kubecontainer.GetContainerSpec(pod, containerName)
if containerSpec == nil {
return nil, fmt.Errorf("container %s not found in pod %s", containerName, podFullName)
}
tty := containerSpec.TTY
return streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty)
default:
return nil, fmt.Errorf("container runtime does not support attach")
}
}
// GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it.
func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID) (*url.URL, error) {
switch streamingRuntime := kl.containerRuntime.(type) {
case kubecontainer.DirectStreamingRuntime:
// Kubelet will serve the attach directly.
return nil, nil
case kubecontainer.IndirectStreamingRuntime:
pods, err := kl.containerRuntime.GetPods(false)
if err != nil {
return nil, err
}
podUID = kl.podManager.TranslatePodUID(podUID)
podFullName := kubecontainer.BuildPodFullName(podName, podNamespace)
pod := kubecontainer.Pods(pods).FindPod(podFullName, podUID)
if pod.IsEmpty() {
return nil, fmt.Errorf("pod not found (%q)", podFullName)
}
return streamingRuntime.GetPortForward(podName, podNamespace, podUID)
default:
return nil, fmt.Errorf("container runtime does not support port-forward")
}
}
// cleanupOrphanedPodCgroups removes the Cgroups of pods that should not be
// running and whose volumes have been cleaned up.
func (kl *Kubelet) cleanupOrphanedPodCgroups(
cgroupPods map[types.UID]cm.CgroupName,
pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
// Add all running and existing terminated pods to a set allPods
allPods := sets.NewString()
for _, pod := range pods {
allPods.Insert(string(pod.UID))
}
for _, pod := range runningPods {
allPods.Insert(string(pod.ID))
}
pcm := kl.containerManager.NewPodContainerManager()
// Iterate over all the found pods to verify if they should be running
for uid, val := range cgroupPods {
if allPods.Has(string(uid)) {
continue
}
// If volumes have not been unmounted/detached, do not delete the cgroup in case so the charge does not go to the parent.
if podVolumesExist := kl.podVolumesExist(uid); podVolumesExist {
glog.V(3).Infof("Orphaned pod %q found, but volumes are not cleaned up, Skipping cgroups deletion: %v", uid)
continue
}
glog.V(3).Infof("Orphaned pod %q found, removing pod cgroups", uid)
// Destroy all cgroups of pod that should not be running,
// by first killing all the attached processes to these cgroups.
// We ignore errors thrown by the method, as the housekeeping loop would
// again try to delete these unwanted pod cgroups
go pcm.Destroy(val)
}
return nil
}
// enableHostUserNamespace determines if the host user namespace should be used by the container runtime.
// Returns true if the pod is using a host pid, pic, or network namespace, the pod is using a non-namespaced
// capability, the pod contains a privileged container, or the pod has a host path volume.
//
// NOTE: when if a container shares any namespace with another container it must also share the user namespace
// or it will not have the correct capabilities in the namespace. This means that host user namespace
// is enabled per pod, not per container.
func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool {
if hasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) {
return true
}
return false
}
// hasPrivilegedContainer returns true if any of the containers in the pod are privileged.
func hasPrivilegedContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil &&
c.SecurityContext.Privileged != nil &&
*c.SecurityContext.Privileged {
return true
}
}
return false
}
// hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container.
func hasNonNamespacedCapability(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
for _, cap := range c.SecurityContext.Capabilities.Add {
if cap == "MKNOD" || cap == "SYS_TIME" || cap == "SYS_MODULE" {
return true
}
}
}
}
return false
}
// hasHostVolume returns true if the pod spec has a HostPath volume.
func hasHostVolume(pod *v1.Pod) bool {
for _, v := range pod.Spec.Volumes {
if v.HostPath != nil {
return true
}
}
return false
}
// hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true.
func hasHostNamespace(pod *v1.Pod) bool {
if pod.Spec.SecurityContext == nil {
return false
}
return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID
}
// hasHostMountPVC returns true if a PVC is referencing a HostPath volume.
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName)
if err != nil {
glog.Warningf("unable to retrieve pvc %s:%s - %v", pod.Namespace, volume.PersistentVolumeClaim.ClaimName, err)
continue
}
if pvc != nil {
referencedVolume, err := kl.kubeClient.Core().PersistentVolumes().Get(pvc.Spec.VolumeName)
if err != nil {
glog.Warningf("unable to retrieve pvc %s - %v", pvc.Spec.VolumeName, err)
continue
}
if referencedVolume != nil && referencedVolume.Spec.HostPath != nil {
return true
}
}
}
}
return false
}
| pkg/kubelet/kubelet_pods.go | 1 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.9981256127357483,
0.007804118562489748,
0.00016009164392016828,
0.00022861361503601074,
0.08065246790647507
] |
{
"id": 3,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\tglog.V(4).Infof(\"Cannot get host IP: %v\", err)\n",
"\t\t} else {\n",
"\t\t\ts.HostIP = hostIP.String()\n",
"\t\t\tif podUsesHostNetwork(pod) && s.PodIP == \"\" {\n",
"\t\t\t\ts.PodIP = hostIP.String()\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif kubecontainer.IsHostNetworkPod(pod) && s.PodIP == \"\" {\n"
],
"file_path": "pkg/kubelet/kubelet_pods.go",
"type": "replace",
"edit_start_line_idx": 1068
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"errors"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/flag"
)
type createAuthInfoOptions struct {
configAccess clientcmd.ConfigAccess
name string
authPath flag.StringFlag
clientCertificate flag.StringFlag
clientKey flag.StringFlag
token flag.StringFlag
username flag.StringFlag
password flag.StringFlag
embedCertData flag.Tristate
authProvider flag.StringFlag
authProviderArgs map[string]string
authProviderArgsToRemove []string
}
const (
flagAuthProvider = "auth-provider"
flagAuthProviderArg = "auth-provider-arg"
)
var (
create_authinfo_long = fmt.Sprintf(templates.LongDesc(`
Sets a user entry in kubeconfig
Specifying a name that already exists will merge new fields on top of existing values.
Client-certificate flags:
--%v=certfile --%v=keyfile
Bearer token flags:
--%v=bearer_token
Basic auth flags:
--%v=basic_user --%v=basic_password
Bearer token and basic auth are mutually exclusive.`), clientcmd.FlagCertFile, clientcmd.FlagKeyFile, clientcmd.FlagBearerToken, clientcmd.FlagUsername, clientcmd.FlagPassword)
create_authinfo_example = templates.Examples(`
# Set only the "client-key" field on the "cluster-admin"
# entry, without touching other values:
kubectl config set-credentials cluster-admin --client-key=~/.kube/admin.key
# Set basic auth for the "cluster-admin" entry
kubectl config set-credentials cluster-admin --username=admin --password=uXFGweU9l35qcif
# Embed client certificate data in the "cluster-admin" entry
kubectl config set-credentials cluster-admin --client-certificate=~/.kube/admin.crt --embed-certs=true
# Enable the Google Compute Platform auth provider for the "cluster-admin" entry
kubectl config set-credentials cluster-admin --auth-provider=gcp
# Enable the OpenID Connect auth provider for the "cluster-admin" entry with additional args
kubectl config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-id=foo --auth-provider-arg=client-secret=bar
# Remove the "client-secret" config value for the OpenID Connect auth provider for the "cluster-admin" entry
kubectl config set-credentials cluster-admin --auth-provider=oidc --auth-provider-arg=client-secret-`)
)
func NewCmdConfigSetAuthInfo(out io.Writer, configAccess clientcmd.ConfigAccess) *cobra.Command {
options := &createAuthInfoOptions{configAccess: configAccess}
return newCmdConfigSetAuthInfo(out, options)
}
func newCmdConfigSetAuthInfo(out io.Writer, options *createAuthInfoOptions) *cobra.Command {
cmd := &cobra.Command{
Use: fmt.Sprintf("set-credentials NAME [--%v=path/to/certfile] [--%v=path/to/keyfile] [--%v=bearer_token] [--%v=basic_user] [--%v=basic_password] [--%v=provider_name] [--%v=key=value]", clientcmd.FlagCertFile, clientcmd.FlagKeyFile, clientcmd.FlagBearerToken, clientcmd.FlagUsername, clientcmd.FlagPassword, flagAuthProvider, flagAuthProviderArg),
Short: "Sets a user entry in kubeconfig",
Long: create_authinfo_long,
Example: create_authinfo_example,
Run: func(cmd *cobra.Command, args []string) {
if !options.complete(cmd, out) {
cmd.Help()
return
}
cmdutil.CheckErr(options.run())
fmt.Fprintf(out, "User %q set.\n", options.name)
},
}
cmd.Flags().Var(&options.clientCertificate, clientcmd.FlagCertFile, "path to "+clientcmd.FlagCertFile+" file for the user entry in kubeconfig")
cmd.MarkFlagFilename(clientcmd.FlagCertFile)
cmd.Flags().Var(&options.clientKey, clientcmd.FlagKeyFile, "path to "+clientcmd.FlagKeyFile+" file for the user entry in kubeconfig")
cmd.MarkFlagFilename(clientcmd.FlagKeyFile)
cmd.Flags().Var(&options.token, clientcmd.FlagBearerToken, clientcmd.FlagBearerToken+" for the user entry in kubeconfig")
cmd.Flags().Var(&options.username, clientcmd.FlagUsername, clientcmd.FlagUsername+" for the user entry in kubeconfig")
cmd.Flags().Var(&options.password, clientcmd.FlagPassword, clientcmd.FlagPassword+" for the user entry in kubeconfig")
cmd.Flags().Var(&options.authProvider, flagAuthProvider, "auth provider for the user entry in kubeconfig")
cmd.Flags().StringSlice(flagAuthProviderArg, nil, "'key=value' arugments for the auth provider")
f := cmd.Flags().VarPF(&options.embedCertData, clientcmd.FlagEmbedCerts, "", "embed client cert/key for the user entry in kubeconfig")
f.NoOptDefVal = "true"
return cmd
}
func (o createAuthInfoOptions) run() error {
err := o.validate()
if err != nil {
return err
}
config, err := o.configAccess.GetStartingConfig()
if err != nil {
return err
}
startingStanza, exists := config.AuthInfos[o.name]
if !exists {
startingStanza = clientcmdapi.NewAuthInfo()
}
authInfo := o.modifyAuthInfo(*startingStanza)
config.AuthInfos[o.name] = &authInfo
if err := clientcmd.ModifyConfig(o.configAccess, *config, true); err != nil {
return err
}
return nil
}
// authInfo builds an AuthInfo object from the options
func (o *createAuthInfoOptions) modifyAuthInfo(existingAuthInfo clientcmdapi.AuthInfo) clientcmdapi.AuthInfo {
modifiedAuthInfo := existingAuthInfo
var setToken, setBasic bool
if o.clientCertificate.Provided() {
certPath := o.clientCertificate.Value()
if o.embedCertData.Value() {
modifiedAuthInfo.ClientCertificateData, _ = ioutil.ReadFile(certPath)
modifiedAuthInfo.ClientCertificate = ""
} else {
certPath, _ = filepath.Abs(certPath)
modifiedAuthInfo.ClientCertificate = certPath
if len(modifiedAuthInfo.ClientCertificate) > 0 {
modifiedAuthInfo.ClientCertificateData = nil
}
}
}
if o.clientKey.Provided() {
keyPath := o.clientKey.Value()
if o.embedCertData.Value() {
modifiedAuthInfo.ClientKeyData, _ = ioutil.ReadFile(keyPath)
modifiedAuthInfo.ClientKey = ""
} else {
keyPath, _ = filepath.Abs(keyPath)
modifiedAuthInfo.ClientKey = keyPath
if len(modifiedAuthInfo.ClientKey) > 0 {
modifiedAuthInfo.ClientKeyData = nil
}
}
}
if o.token.Provided() {
modifiedAuthInfo.Token = o.token.Value()
setToken = len(modifiedAuthInfo.Token) > 0
}
if o.username.Provided() {
modifiedAuthInfo.Username = o.username.Value()
setBasic = setBasic || len(modifiedAuthInfo.Username) > 0
}
if o.password.Provided() {
modifiedAuthInfo.Password = o.password.Value()
setBasic = setBasic || len(modifiedAuthInfo.Password) > 0
}
if o.authProvider.Provided() {
newName := o.authProvider.Value()
// Only overwrite if the existing auth-provider is nil, or different than the newly specified one.
if modifiedAuthInfo.AuthProvider == nil || modifiedAuthInfo.AuthProvider.Name != newName {
modifiedAuthInfo.AuthProvider = &clientcmdapi.AuthProviderConfig{
Name: newName,
}
}
}
if modifiedAuthInfo.AuthProvider != nil {
if modifiedAuthInfo.AuthProvider.Config == nil {
modifiedAuthInfo.AuthProvider.Config = make(map[string]string)
}
for _, toRemove := range o.authProviderArgsToRemove {
delete(modifiedAuthInfo.AuthProvider.Config, toRemove)
}
for key, value := range o.authProviderArgs {
modifiedAuthInfo.AuthProvider.Config[key] = value
}
}
// If any auth info was set, make sure any other existing auth types are cleared
if setToken || setBasic {
if !setToken {
modifiedAuthInfo.Token = ""
}
if !setBasic {
modifiedAuthInfo.Username = ""
modifiedAuthInfo.Password = ""
}
}
return modifiedAuthInfo
}
func (o *createAuthInfoOptions) complete(cmd *cobra.Command, out io.Writer) bool {
args := cmd.Flags().Args()
if len(args) != 1 {
return false
}
authProviderArgs, err := cmd.Flags().GetStringSlice(flagAuthProviderArg)
if err != nil {
fmt.Fprintf(out, "Error: %s\n", err)
return false
}
if len(authProviderArgs) > 0 {
newPairs, removePairs, err := cmdutil.ParsePairs(authProviderArgs, flagAuthProviderArg, true)
if err != nil {
fmt.Fprintf(out, "Error: %s\n", err)
return false
}
o.authProviderArgs = newPairs
o.authProviderArgsToRemove = removePairs
}
o.name = args[0]
return true
}
func (o createAuthInfoOptions) validate() error {
if len(o.name) == 0 {
return errors.New("you must specify a non-empty user name")
}
methods := []string{}
if len(o.token.Value()) > 0 {
methods = append(methods, fmt.Sprintf("--%v", clientcmd.FlagBearerToken))
}
if len(o.username.Value()) > 0 || len(o.password.Value()) > 0 {
methods = append(methods, fmt.Sprintf("--%v/--%v", clientcmd.FlagUsername, clientcmd.FlagPassword))
}
if len(methods) > 1 {
return fmt.Errorf("you cannot specify more than one authentication method at the same time: %v", strings.Join(methods, ", "))
}
if o.embedCertData.Value() {
certPath := o.clientCertificate.Value()
keyPath := o.clientKey.Value()
if certPath == "" && keyPath == "" {
return fmt.Errorf("you must specify a --%s or --%s to embed", clientcmd.FlagCertFile, clientcmd.FlagKeyFile)
}
if certPath != "" {
if _, err := ioutil.ReadFile(certPath); err != nil {
return fmt.Errorf("error reading %s data from %s: %v", clientcmd.FlagCertFile, certPath, err)
}
}
if keyPath != "" {
if _, err := ioutil.ReadFile(keyPath); err != nil {
return fmt.Errorf("error reading %s data from %s: %v", clientcmd.FlagKeyFile, keyPath, err)
}
}
}
return nil
}
| pkg/kubectl/cmd/config/create_authinfo.go | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.00022786053887102753,
0.00017476920038461685,
0.00016156335186678916,
0.000174198838067241,
0.000010994046533596702
] |
{
"id": 3,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\tglog.V(4).Infof(\"Cannot get host IP: %v\", err)\n",
"\t\t} else {\n",
"\t\t\ts.HostIP = hostIP.String()\n",
"\t\t\tif podUsesHostNetwork(pod) && s.PodIP == \"\" {\n",
"\t\t\t\ts.PodIP = hostIP.String()\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif kubecontainer.IsHostNetworkPod(pod) && s.PodIP == \"\" {\n"
],
"file_path": "pkg/kubelet/kubelet_pods.go",
"type": "replace",
"edit_start_line_idx": 1068
} | apiVersion: v1
kind: ReplicationController
metadata:
name: update-demo-kitten
spec:
selector:
name: update-demo
version: kitten
template:
metadata:
labels:
name: update-demo
version: kitten
spec:
containers:
- image: gcr.io/google_containers/update-demo:kitten
name: update-demo
ports:
- containerPort: 80
protocol: TCP
| test/fixtures/doc-yaml/user-guide/update-demo/kitten-rc.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.0001800877507776022,
0.0001789791276678443,
0.00017822592053562403,
0.0001786236825864762,
8.005638392205583e-7
] |
{
"id": 3,
"code_window": [
"\t\tif err != nil {\n",
"\t\t\tglog.V(4).Infof(\"Cannot get host IP: %v\", err)\n",
"\t\t} else {\n",
"\t\t\ts.HostIP = hostIP.String()\n",
"\t\t\tif podUsesHostNetwork(pod) && s.PodIP == \"\" {\n",
"\t\t\t\ts.PodIP = hostIP.String()\n",
"\t\t\t}\n",
"\t\t}\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\tif kubecontainer.IsHostNetworkPod(pod) && s.PodIP == \"\" {\n"
],
"file_path": "pkg/kubelet/kubelet_pods.go",
"type": "replace",
"edit_start_line_idx": 1068
} | # Contributing to SpdyStream
Want to hack on spdystream? Awesome! Here are instructions to get you
started.
SpdyStream is a part of the [Docker](https://docker.io) project, and follows
the same rules and principles. If you're already familiar with the way
Docker does things, you'll feel right at home.
Otherwise, go read
[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
Happy hacking!
| vendor/github.com/docker/spdystream/CONTRIBUTING.md | 0 | https://github.com/kubernetes/kubernetes/commit/15fc470343dfb6d8593f2548fe24ccec5ca27761 | [
0.00017466371355112642,
0.0001727342896629125,
0.00017080485122278333,
0.0001727342896629125,
0.0000019294311641715467
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"// handleMsg is invoked whenever an inbound message is received from a remote\n",
"// peer. The remote connection is torn down upon returning any error.\n",
"func (pm *ProtocolManager) handleMsg(p *peer) error {\n",
"\t// Read the next message from the remote peer, and ensure it's fully consumed\n",
"\tmsg, err := p.rw.ReadMsg()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tselect {\n",
"\tcase err := <-p.errCh:\n",
"\t\treturn err\n",
"\tdefault:\n",
"\t}\n"
],
"file_path": "les/handler.go",
"type": "add",
"edit_start_line_idx": 331
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
)
var (
errClosed = errors.New("peer set is closed")
errAlreadyRegistered = errors.New("peer is already registered")
errNotRegistered = errors.New("peer is not registered")
errInvalidHelpTrieReq = errors.New("invalid help trie request")
)
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
// capacity limitation for parameter updates
const (
allowedUpdateBytes = 100000 // initial/maximum allowed update size
allowedUpdateRate = time.Millisecond * 10 // time constant for recharging one byte of allowance
)
// if the total encoded size of a sent transaction batch is over txSizeCostLimit
// per transaction then the request cost is calculated as proportional to the
// encoded size instead of the transaction count
const txSizeCostLimit = 0x10000
const (
announceTypeNone = iota
announceTypeSimple
announceTypeSigned
)
type peer struct {
*p2p.Peer
rw p2p.MsgReadWriter
version int // Protocol version negotiated
network uint64 // Network ID being on
announceType uint64
id string
headInfo *announceData
lock sync.RWMutex
sendQueue *execQueue
errCh chan error
// responseLock ensures that responses are queued in the same order as
// RequestProcessed is called
responseLock sync.Mutex
responseCount uint64
poolEntry *poolEntry
hasBlock func(common.Hash, uint64, bool) bool
responseErrors int
updateCounter uint64
updateTime mclock.AbsTime
fcClient *flowcontrol.ClientNode // nil if the peer is server only
fcServer *flowcontrol.ServerNode // nil if the peer is client only
fcParams flowcontrol.ServerParams
fcCosts requestCostTable
isTrusted bool
isOnlyAnnounce bool
}
func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID()
return &peer{
Peer: p,
rw: rw,
version: version,
network: network,
id: fmt.Sprintf("%x", id),
isTrusted: isTrusted,
}
}
// rejectUpdate returns true if a parameter update has to be rejected because
// the size and/or rate of updates exceed the capacity limitation
func (p *peer) rejectUpdate(size uint64) bool {
now := mclock.Now()
if p.updateCounter == 0 {
p.updateTime = now
} else {
dt := now - p.updateTime
r := uint64(dt / mclock.AbsTime(allowedUpdateRate))
if p.updateCounter > r {
p.updateCounter -= r
p.updateTime += mclock.AbsTime(allowedUpdateRate * time.Duration(r))
} else {
p.updateCounter = 0
p.updateTime = now
}
}
p.updateCounter += size
return p.updateCounter > allowedUpdateBytes
}
func (p *peer) canQueue() bool {
return p.sendQueue.canQueue()
}
func (p *peer) queueSend(f func()) {
p.sendQueue.queue(f)
}
// Info gathers and returns a collection of metadata known about a peer.
func (p *peer) Info() *eth.PeerInfo {
return ð.PeerInfo{
Version: p.version,
Difficulty: p.Td(),
Head: fmt.Sprintf("%x", p.Head()),
}
}
// Head retrieves a copy of the current head (most recent) hash of the peer.
func (p *peer) Head() (hash common.Hash) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.headInfo.Hash[:])
return hash
}
func (p *peer) HeadAndTd() (hash common.Hash, td *big.Int) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.headInfo.Hash[:])
return hash, p.headInfo.Td
}
func (p *peer) headBlockInfo() blockInfo {
p.lock.RLock()
defer p.lock.RUnlock()
return blockInfo{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}
}
// Td retrieves the current total difficulty of a peer.
func (p *peer) Td() *big.Int {
p.lock.RLock()
defer p.lock.RUnlock()
return new(big.Int).Set(p.headInfo.Td)
}
// waitBefore implements distPeer interface
func (p *peer) waitBefore(maxCost uint64) (time.Duration, float64) {
return p.fcServer.CanSend(maxCost)
}
// updateCapacity updates the request serving capacity assigned to a given client
// and also sends an announcement about the updated flow control parameters
func (p *peer) updateCapacity(cap uint64) {
p.responseLock.Lock()
defer p.responseLock.Unlock()
p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}
p.fcClient.UpdateParams(p.fcParams)
var kvList keyValueList
kvList = kvList.add("flowControl/MRR", cap)
kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
p.queueSend(func() { p.SendAnnounce(announceData{Update: kvList}) })
}
func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) error {
type req struct {
ReqID uint64
Data interface{}
}
return p2p.Send(w, msgcode, req{reqID, data})
}
// reply struct represents a reply with the actual data already RLP encoded and
// only the bv (buffer value) missing. This allows the serving mechanism to
// calculate the bv value which depends on the data size before sending the reply.
type reply struct {
w p2p.MsgWriter
msgcode, reqID uint64
data rlp.RawValue
}
// send sends the reply with the calculated buffer value
func (r *reply) send(bv uint64) error {
type resp struct {
ReqID, BV uint64
Data rlp.RawValue
}
return p2p.Send(r.w, r.msgcode, resp{r.reqID, bv, r.data})
}
// size returns the RLP encoded size of the message data
func (r *reply) size() uint32 {
return uint32(len(r.data))
}
func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
if cost > p.fcParams.BufLimit {
cost = p.fcParams.BufLimit
}
return cost
}
func (p *peer) GetTxRelayCost(amount, size int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
var msgcode uint64
switch p.version {
case lpv1:
msgcode = SendTxMsg
case lpv2:
msgcode = SendTxV2Msg
default:
panic(nil)
}
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
sizeCost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(size)/txSizeCostLimit
if sizeCost > cost {
cost = sizeCost
}
if cost > p.fcParams.BufLimit {
cost = p.fcParams.BufLimit
}
return cost
}
// HasBlock checks if the peer has a given block
func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
p.lock.RLock()
hasBlock := p.hasBlock
p.lock.RUnlock()
return hasBlock != nil && hasBlock(hash, number, hasState)
}
// SendAnnounce announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendAnnounce(request announceData) error {
return p2p.Send(p.rw, AnnounceMsg, request)
}
// ReplyBlockHeaders creates a reply with a batch of block headers
func (p *peer) ReplyBlockHeaders(reqID uint64, headers []*types.Header) *reply {
data, _ := rlp.EncodeToBytes(headers)
return &reply{p.rw, BlockHeadersMsg, reqID, data}
}
// ReplyBlockBodiesRLP creates a reply with a batch of block contents from
// an already RLP encoded format.
func (p *peer) ReplyBlockBodiesRLP(reqID uint64, bodies []rlp.RawValue) *reply {
data, _ := rlp.EncodeToBytes(bodies)
return &reply{p.rw, BlockBodiesMsg, reqID, data}
}
// ReplyCode creates a reply with a batch of arbitrary internal data, corresponding to the
// hashes requested.
func (p *peer) ReplyCode(reqID uint64, codes [][]byte) *reply {
data, _ := rlp.EncodeToBytes(codes)
return &reply{p.rw, CodeMsg, reqID, data}
}
// ReplyReceiptsRLP creates a reply with a batch of transaction receipts, corresponding to the
// ones requested from an already RLP encoded format.
func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {
data, _ := rlp.EncodeToBytes(receipts)
return &reply{p.rw, ReceiptsMsg, reqID, data}
}
// ReplyProofs creates a reply with a batch of legacy LES/1 merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofs(reqID uint64, proofs proofsData) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV1Msg, reqID, data}
}
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV2Msg, reqID, data}
}
// ReplyHeaderProofs creates a reply with a batch of legacy LES/1 header proofs, corresponding to the ones requested.
func (p *peer) ReplyHeaderProofs(reqID uint64, proofs []ChtResp) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, HeaderProofsMsg, reqID, data}
}
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
data, _ := rlp.EncodeToBytes(resp)
return &reply{p.rw, HelperTrieProofsMsg, reqID, data}
}
// ReplyTxStatus creates a reply with a batch of transaction status records, corresponding to the ones requested.
func (p *peer) ReplyTxStatus(reqID uint64, stats []txStatus) *reply {
data, _ := rlp.EncodeToBytes(stats)
return &reply{p.rw, TxStatusMsg, reqID, data}
}
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
// specified header query, based on the hash of an origin block.
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block.
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified.
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
}
// RequestCode fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *peer) RequestCode(reqID, cost uint64, reqs []CodeReq) error {
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
}
// RequestReceipts fetches a batch of transaction receipts from a remote node.
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
}
// RequestProofs fetches a batch of merkle proofs from a remote node.
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
switch p.version {
case lpv1:
return sendRequest(p.rw, GetProofsV1Msg, reqID, cost, reqs)
case lpv2:
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
default:
panic(nil)
}
}
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, data interface{}) error {
switch p.version {
case lpv1:
reqs, ok := data.([]ChtReq)
if !ok {
return errInvalidHelpTrieReq
}
p.Log().Debug("Fetching batch of header proofs", "count", len(reqs))
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
case lpv2:
reqs, ok := data.([]HelperTrieReq)
if !ok {
return errInvalidHelpTrieReq
}
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
default:
panic(nil)
}
}
// RequestTxStatus fetches a batch of transaction status records from a remote node.
func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error {
p.Log().Debug("Requesting transaction status", "count", len(txHashes))
return sendRequest(p.rw, GetTxStatusMsg, reqID, cost, txHashes)
}
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
p.Log().Debug("Sending batch of transactions", "size", len(txs))
switch p.version {
case lpv1:
return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID
case lpv2:
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
default:
panic(nil)
}
}
type keyValueEntry struct {
Key string
Value rlp.RawValue
}
type keyValueList []keyValueEntry
type keyValueMap map[string]rlp.RawValue
func (l keyValueList) add(key string, val interface{}) keyValueList {
var entry keyValueEntry
entry.Key = key
if val == nil {
val = uint64(0)
}
enc, err := rlp.EncodeToBytes(val)
if err == nil {
entry.Value = enc
}
return append(l, entry)
}
func (l keyValueList) decode() (keyValueMap, uint64) {
m := make(keyValueMap)
var size uint64
for _, entry := range l {
m[entry.Key] = entry.Value
size += uint64(len(entry.Key)) + uint64(len(entry.Value)) + 8
}
return m, size
}
func (m keyValueMap) get(key string, val interface{}) error {
enc, ok := m[key]
if !ok {
return errResp(ErrMissingKey, "%s", key)
}
if val == nil {
return nil
}
return rlp.DecodeBytes(enc, val)
}
func (p *peer) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) {
// Send out own handshake in a new thread
errc := make(chan error, 1)
go func() {
errc <- p2p.Send(p.rw, StatusMsg, sendList)
}()
// In the mean time retrieve the remote status message
msg, err := p.rw.ReadMsg()
if err != nil {
return nil, err
}
if msg.Code != StatusMsg {
return nil, errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
}
if msg.Size > ProtocolMaxMsgSize {
return nil, errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
// Decode the handshake
var recvList keyValueList
if err := msg.Decode(&recvList); err != nil {
return nil, errResp(ErrDecode, "msg %v: %v", msg, err)
}
if err := <-errc; err != nil {
return nil, err
}
return recvList, nil
}
// Handshake executes the les protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
p.lock.Lock()
defer p.lock.Unlock()
var send keyValueList
send = send.add("protocolVersion", uint64(p.version))
send = send.add("networkId", p.network)
send = send.add("headTd", td)
send = send.add("headHash", head)
send = send.add("headNum", headNum)
send = send.add("genesisHash", genesis)
if server != nil {
if !server.onlyAnnounce {
//only announce server. It sends only announse requests
send = send.add("serveHeaders", nil)
send = send.add("serveChainSince", uint64(0))
send = send.add("serveStateSince", uint64(0))
send = send.add("txRelay", nil)
}
send = send.add("flowControl/BL", server.defParams.BufLimit)
send = send.add("flowControl/MRR", server.defParams.MinRecharge)
var costList RequestCostList
if server.costTracker != nil {
costList = server.costTracker.makeCostList()
} else {
costList = testCostList()
}
send = send.add("flowControl/MRC", costList)
p.fcCosts = costList.decode()
p.fcParams = server.defParams
} else {
//on client node
p.announceType = announceTypeSimple
if p.isTrusted {
p.announceType = announceTypeSigned
}
send = send.add("announceType", p.announceType)
}
recvList, err := p.sendReceiveHandshake(send)
if err != nil {
return err
}
recv, size := recvList.decode()
if p.rejectUpdate(size) {
return errResp(ErrRequestRejected, "")
}
var rGenesis, rHash common.Hash
var rVersion, rNetwork, rNum uint64
var rTd *big.Int
if err := recv.get("protocolVersion", &rVersion); err != nil {
return err
}
if err := recv.get("networkId", &rNetwork); err != nil {
return err
}
if err := recv.get("headTd", &rTd); err != nil {
return err
}
if err := recv.get("headHash", &rHash); err != nil {
return err
}
if err := recv.get("headNum", &rNum); err != nil {
return err
}
if err := recv.get("genesisHash", &rGenesis); err != nil {
return err
}
if rGenesis != genesis {
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", rGenesis[:8], genesis[:8])
}
if rNetwork != p.network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", rNetwork, p.network)
}
if int(rVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version)
}
if server != nil {
// until we have a proper peer connectivity API, allow LES connection to other servers
/*if recv.get("serveStateSince", nil) == nil {
return errResp(ErrUselessPeer, "wanted client, got server")
}*/
if recv.get("announceType", &p.announceType) != nil {
//set default announceType on server side
p.announceType = announceTypeSimple
}
p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
} else {
//mark OnlyAnnounce server if "serveHeaders", "serveChainSince", "serveStateSince" or "txRelay" fields don't exist
if recv.get("serveChainSince", nil) != nil {
p.isOnlyAnnounce = true
}
if recv.get("serveStateSince", nil) != nil {
p.isOnlyAnnounce = true
}
if recv.get("txRelay", nil) != nil {
p.isOnlyAnnounce = true
}
if p.isOnlyAnnounce && !p.isTrusted {
return errResp(ErrUselessPeer, "peer cannot serve requests")
}
var params flowcontrol.ServerParams
if err := recv.get("flowControl/BL", ¶ms.BufLimit); err != nil {
return err
}
if err := recv.get("flowControl/MRR", ¶ms.MinRecharge); err != nil {
return err
}
var MRC RequestCostList
if err := recv.get("flowControl/MRC", &MRC); err != nil {
return err
}
p.fcParams = params
p.fcServer = flowcontrol.NewServerNode(params, &mclock.System{})
p.fcCosts = MRC.decode()
}
p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}
return nil
}
// updateFlowControl updates the flow control parameters belonging to the server
// node if the announced key/value set contains relevant fields
func (p *peer) updateFlowControl(update keyValueMap) {
if p.fcServer == nil {
return
}
params := p.fcParams
updateParams := false
if update.get("flowControl/BL", ¶ms.BufLimit) == nil {
updateParams = true
}
if update.get("flowControl/MRR", ¶ms.MinRecharge) == nil {
updateParams = true
}
if updateParams {
p.fcParams = params
p.fcServer.UpdateParams(params)
}
var MRC RequestCostList
if update.get("flowControl/MRC", &MRC) == nil {
p.fcCosts = MRC.decode()
}
}
// String implements fmt.Stringer.
func (p *peer) String() string {
return fmt.Sprintf("Peer %s [%s]", p.id,
fmt.Sprintf("les/%d", p.version),
)
}
// peerSetNotify is a callback interface to notify services about added or
// removed peers
type peerSetNotify interface {
registerPeer(*peer)
unregisterPeer(*peer)
}
// peerSet represents the collection of active peers currently participating in
// the Light Ethereum sub-protocol.
type peerSet struct {
peers map[string]*peer
lock sync.RWMutex
notifyList []peerSetNotify
closed bool
}
// newPeerSet creates a new peer set to track the active participants.
func newPeerSet() *peerSet {
return &peerSet{
peers: make(map[string]*peer),
}
}
// notify adds a service to be notified about added or removed peers
func (ps *peerSet) notify(n peerSetNotify) {
ps.lock.Lock()
ps.notifyList = append(ps.notifyList, n)
peers := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
peers = append(peers, p)
}
ps.lock.Unlock()
for _, p := range peers {
n.registerPeer(p)
}
}
// Register injects a new peer into the working set, or returns an error if the
// peer is already known.
func (ps *peerSet) Register(p *peer) error {
ps.lock.Lock()
if ps.closed {
ps.lock.Unlock()
return errClosed
}
if _, ok := ps.peers[p.id]; ok {
ps.lock.Unlock()
return errAlreadyRegistered
}
ps.peers[p.id] = p
p.sendQueue = newExecQueue(100)
peers := make([]peerSetNotify, len(ps.notifyList))
copy(peers, ps.notifyList)
ps.lock.Unlock()
for _, n := range peers {
n.registerPeer(p)
}
return nil
}
// Unregister removes a remote peer from the active set, disabling any further
// actions to/from that particular entity. It also initiates disconnection at the networking layer.
func (ps *peerSet) Unregister(id string) error {
ps.lock.Lock()
if p, ok := ps.peers[id]; !ok {
ps.lock.Unlock()
return errNotRegistered
} else {
delete(ps.peers, id)
peers := make([]peerSetNotify, len(ps.notifyList))
copy(peers, ps.notifyList)
ps.lock.Unlock()
for _, n := range peers {
n.unregisterPeer(p)
}
p.sendQueue.quit()
p.Peer.Disconnect(p2p.DiscUselessPeer)
return nil
}
}
// AllPeerIDs returns a list of all registered peer IDs
func (ps *peerSet) AllPeerIDs() []string {
ps.lock.RLock()
defer ps.lock.RUnlock()
res := make([]string, len(ps.peers))
idx := 0
for id := range ps.peers {
res[idx] = id
idx++
}
return res
}
// Peer retrieves the registered peer with the given id.
func (ps *peerSet) Peer(id string) *peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
return ps.peers[id]
}
// Len returns if the current number of peers in the set.
func (ps *peerSet) Len() int {
ps.lock.RLock()
defer ps.lock.RUnlock()
return len(ps.peers)
}
// BestPeer retrieves the known peer with the currently highest total difficulty.
func (ps *peerSet) BestPeer() *peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
var (
bestPeer *peer
bestTd *big.Int
)
for _, p := range ps.peers {
if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 {
bestPeer, bestTd = p, td
}
}
return bestPeer
}
// AllPeers returns all peers in a list
func (ps *peerSet) AllPeers() []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, len(ps.peers))
i := 0
for _, peer := range ps.peers {
list[i] = peer
i++
}
return list
}
// Close disconnects all peers.
// No new peers can be registered after Close has returned.
func (ps *peerSet) Close() {
ps.lock.Lock()
defer ps.lock.Unlock()
for _, p := range ps.peers {
p.Disconnect(p2p.DiscQuitting)
}
ps.closed = true
}
| les/peer.go | 1 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.8852399587631226,
0.015882428735494614,
0.00016744456661399454,
0.0011699660681188107,
0.09728758037090302
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"// handleMsg is invoked whenever an inbound message is received from a remote\n",
"// peer. The remote connection is torn down upon returning any error.\n",
"func (pm *ProtocolManager) handleMsg(p *peer) error {\n",
"\t// Read the next message from the remote peer, and ensure it's fully consumed\n",
"\tmsg, err := p.rw.ReadMsg()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tselect {\n",
"\tcase err := <-p.errCh:\n",
"\t\treturn err\n",
"\tdefault:\n",
"\t}\n"
],
"file_path": "les/handler.go",
"type": "add",
"edit_start_line_idx": 331
} | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
)
type TBinaryProtocol struct {
trans TRichTransport
origTransport TTransport
reader io.Reader
writer io.Writer
strictRead bool
strictWrite bool
buffer [64]byte
}
type TBinaryProtocolFactory struct {
strictRead bool
strictWrite bool
}
func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
return NewTBinaryProtocol(t, false, true)
}
func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
if et, ok := t.(TRichTransport); ok {
p.trans = et
} else {
p.trans = NewTRichTransport(t)
}
p.reader = p.trans
p.writer = p.trans
return p
}
func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
return NewTBinaryProtocolFactory(false, true)
}
func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
}
func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
}
/**
* Writing Methods
*/
func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
if p.strictWrite {
version := uint32(VERSION_1) | uint32(typeId)
e := p.WriteI32(int32(version))
if e != nil {
return e
}
e = p.WriteString(name)
if e != nil {
return e
}
e = p.WriteI32(seqId)
return e
} else {
e := p.WriteString(name)
if e != nil {
return e
}
e = p.WriteByte(int8(typeId))
if e != nil {
return e
}
e = p.WriteI32(seqId)
return e
}
return nil
}
func (p *TBinaryProtocol) WriteMessageEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteStructBegin(name string) error {
return nil
}
func (p *TBinaryProtocol) WriteStructEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
e := p.WriteByte(int8(typeId))
if e != nil {
return e
}
e = p.WriteI16(id)
return e
}
func (p *TBinaryProtocol) WriteFieldEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteFieldStop() error {
e := p.WriteByte(STOP)
return e
}
func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
e := p.WriteByte(int8(keyType))
if e != nil {
return e
}
e = p.WriteByte(int8(valueType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteMapEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
e := p.WriteByte(int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteListEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
e := p.WriteByte(int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteSetEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteBool(value bool) error {
if value {
return p.WriteByte(1)
}
return p.WriteByte(0)
}
func (p *TBinaryProtocol) WriteByte(value int8) error {
e := p.trans.WriteByte(byte(value))
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI16(value int16) error {
v := p.buffer[0:2]
binary.BigEndian.PutUint16(v, uint16(value))
_, e := p.writer.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI32(value int32) error {
v := p.buffer[0:4]
binary.BigEndian.PutUint32(v, uint32(value))
_, e := p.writer.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI64(value int64) error {
v := p.buffer[0:8]
binary.BigEndian.PutUint64(v, uint64(value))
_, err := p.writer.Write(v)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteDouble(value float64) error {
return p.WriteI64(int64(math.Float64bits(value)))
}
func (p *TBinaryProtocol) WriteString(value string) error {
e := p.WriteI32(int32(len(value)))
if e != nil {
return e
}
_, err := p.trans.WriteString(value)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteBinary(value []byte) error {
e := p.WriteI32(int32(len(value)))
if e != nil {
return e
}
_, err := p.writer.Write(value)
return NewTProtocolException(err)
}
/**
* Reading methods
*/
func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
size, e := p.ReadI32()
if e != nil {
return "", typeId, 0, NewTProtocolException(e)
}
if size < 0 {
typeId = TMessageType(size & 0x0ff)
version := int64(int64(size) & VERSION_MASK)
if version != VERSION_1 {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
}
name, e = p.ReadString()
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
seqId, e = p.ReadI32()
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
return name, typeId, seqId, nil
}
if p.strictRead {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
}
name, e2 := p.readStringBody(size)
if e2 != nil {
return name, typeId, seqId, e2
}
b, e3 := p.ReadByte()
if e3 != nil {
return name, typeId, seqId, e3
}
typeId = TMessageType(b)
seqId, e4 := p.ReadI32()
if e4 != nil {
return name, typeId, seqId, e4
}
return name, typeId, seqId, nil
}
func (p *TBinaryProtocol) ReadMessageEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
return
}
func (p *TBinaryProtocol) ReadStructEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
t, err := p.ReadByte()
typeId = TType(t)
if err != nil {
return name, typeId, seqId, err
}
if t != STOP {
seqId, err = p.ReadI16()
}
return name, typeId, seqId, err
}
func (p *TBinaryProtocol) ReadFieldEnd() error {
return nil
}
var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
k, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
kType = TType(k)
v, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
vType = TType(v)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return kType, vType, size, nil
}
func (p *TBinaryProtocol) ReadMapEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
b, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return
}
func (p *TBinaryProtocol) ReadListEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
b, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return elemType, size, nil
}
func (p *TBinaryProtocol) ReadSetEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadBool() (bool, error) {
b, e := p.ReadByte()
v := true
if b != 1 {
v = false
}
return v, e
}
func (p *TBinaryProtocol) ReadByte() (int8, error) {
v, err := p.trans.ReadByte()
return int8(v), err
}
func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
buf := p.buffer[0:2]
err = p.readAll(buf)
value = int16(binary.BigEndian.Uint16(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
buf := p.buffer[0:4]
err = p.readAll(buf)
value = int32(binary.BigEndian.Uint32(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
buf := p.buffer[0:8]
err = p.readAll(buf)
value = int64(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
buf := p.buffer[0:8]
err = p.readAll(buf)
value = math.Float64frombits(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadString() (value string, err error) {
size, e := p.ReadI32()
if e != nil {
return "", e
}
if size < 0 {
err = invalidDataLength
return
}
return p.readStringBody(size)
}
func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
size, e := p.ReadI32()
if e != nil {
return nil, e
}
if size < 0 {
return nil, invalidDataLength
}
if uint64(size) > p.trans.RemainingBytes() {
return nil, invalidDataLength
}
isize := int(size)
buf := make([]byte, isize)
_, err := io.ReadFull(p.trans, buf)
return buf, NewTProtocolException(err)
}
func (p *TBinaryProtocol) Flush() (err error) {
return NewTProtocolException(p.trans.Flush())
}
func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
return SkipDefaultDepth(p, fieldType)
}
func (p *TBinaryProtocol) Transport() TTransport {
return p.origTransport
}
func (p *TBinaryProtocol) readAll(buf []byte) error {
_, err := io.ReadFull(p.reader, buf)
return NewTProtocolException(err)
}
const readLimit = 32768
func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
if size < 0 {
return "", nil
}
if uint64(size) > p.trans.RemainingBytes() {
return "", invalidDataLength
}
var (
buf bytes.Buffer
e error
b []byte
)
switch {
case int(size) <= len(p.buffer):
b = p.buffer[:size] // avoids allocation for small reads
case int(size) < readLimit:
b = make([]byte, size)
default:
b = make([]byte, readLimit)
}
for size > 0 {
_, e = io.ReadFull(p.trans, b)
buf.Write(b)
if e != nil {
break
}
size -= readLimit
if size < readLimit && size > 0 {
b = b[:size]
}
}
return buf.String(), NewTProtocolException(e)
}
| vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.00998084619641304,
0.0017390100983902812,
0.0001681564172031358,
0.0009716777130961418,
0.001896852976642549
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"// handleMsg is invoked whenever an inbound message is received from a remote\n",
"// peer. The remote connection is torn down upon returning any error.\n",
"func (pm *ProtocolManager) handleMsg(p *peer) error {\n",
"\t// Read the next message from the remote peer, and ensure it's fully consumed\n",
"\tmsg, err := p.rw.ReadMsg()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tselect {\n",
"\tcase err := <-p.errCh:\n",
"\t\treturn err\n",
"\tdefault:\n",
"\t}\n"
],
"file_path": "les/handler.go",
"type": "add",
"edit_start_line_idx": 331
} | package simulations
import (
"testing"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/rpc"
)
// NoopService is the service that does not do anything
// but implements node.Service interface.
type NoopService struct {
c map[enode.ID]chan struct{}
}
func NewNoopService(ackC map[enode.ID]chan struct{}) *NoopService {
return &NoopService{
c: ackC,
}
}
func (t *NoopService) Protocols() []p2p.Protocol {
return []p2p.Protocol{
{
Name: "noop",
Version: 666,
Length: 0,
Run: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
if t.c != nil {
t.c[peer.ID()] = make(chan struct{})
close(t.c[peer.ID()])
}
rw.ReadMsg()
return nil
},
NodeInfo: func() interface{} {
return struct{}{}
},
PeerInfo: func(id enode.ID) interface{} {
return struct{}{}
},
Attributes: []enr.Entry{},
},
}
}
func (t *NoopService) APIs() []rpc.API {
return []rpc.API{}
}
func (t *NoopService) Start(server *p2p.Server) error {
return nil
}
func (t *NoopService) Stop() error {
return nil
}
func VerifyRing(t *testing.T, net *Network, ids []enode.ID) {
t.Helper()
n := len(ids)
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
c := net.GetConn(ids[i], ids[j])
if i == j-1 || (i == 0 && j == n-1) {
if c == nil {
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
}
} else {
if c != nil {
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
}
}
}
}
}
func VerifyChain(t *testing.T, net *Network, ids []enode.ID) {
t.Helper()
n := len(ids)
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
c := net.GetConn(ids[i], ids[j])
if i == j-1 {
if c == nil {
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
}
} else {
if c != nil {
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
}
}
}
}
}
func VerifyFull(t *testing.T, net *Network, ids []enode.ID) {
t.Helper()
n := len(ids)
var connections int
for i, lid := range ids {
for _, rid := range ids[i+1:] {
if net.GetConn(lid, rid) != nil {
connections++
}
}
}
want := n * (n - 1) / 2
if connections != want {
t.Errorf("wrong number of connections, got: %v, want: %v", connections, want)
}
}
func VerifyStar(t *testing.T, net *Network, ids []enode.ID, centerIndex int) {
t.Helper()
n := len(ids)
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
c := net.GetConn(ids[i], ids[j])
if i == centerIndex || j == centerIndex {
if c == nil {
t.Errorf("nodes %v and %v are not connected, but they should be", i, j)
}
} else {
if c != nil {
t.Errorf("nodes %v and %v are connected, but they should not be", i, j)
}
}
}
}
}
| p2p/simulations/test.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.004846200812608004,
0.000841597851831466,
0.00016782147577032447,
0.00021358260710258037,
0.0015156746376305819
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"// handleMsg is invoked whenever an inbound message is received from a remote\n",
"// peer. The remote connection is torn down upon returning any error.\n",
"func (pm *ProtocolManager) handleMsg(p *peer) error {\n",
"\t// Read the next message from the remote peer, and ensure it's fully consumed\n",
"\tmsg, err := p.rw.ReadMsg()\n",
"\tif err != nil {\n",
"\t\treturn err\n",
"\t}\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tselect {\n",
"\tcase err := <-p.errCh:\n",
"\t\treturn err\n",
"\tdefault:\n",
"\t}\n"
],
"file_path": "les/handler.go",
"type": "add",
"edit_start_line_idx": 331
} | {{.CommentWithoutT "a"}}
func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
}
| vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.00016756293189246207,
0.00016756293189246207,
0.00016756293189246207,
0.00016756293189246207,
0
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\tbv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)\n",
"\t\tif reply != nil {\n",
"\t\t\tp.queueSend(func() {\n",
"\t\t\t\tif err := reply.send(bv); err != nil {\n",
"\t\t\t\t\tp.errCh <- err\n",
"\t\t\t\t}\n",
"\t\t\t})\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\tselect {\n",
"\t\t\t\t\tcase p.errCh <- err:\n",
"\t\t\t\t\tdefault:\n",
"\t\t\t\t\t}\n"
],
"file_path": "les/handler.go",
"type": "replace",
"edit_start_line_idx": 391
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"encoding/binary"
"encoding/json"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
const (
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
ethVersion = 63 // equivalent eth version for the downloader
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request
MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request
MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request
MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
MaxHelperTrieProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
MaxTxSend = 64 // Amount of transactions to be send per request
MaxTxStatus = 256 // Amount of transactions to queried per request
disableClientRemovePeer = false
)
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
}
type BlockChain interface {
Config() *params.ChainConfig
HasHeader(hash common.Hash, number uint64) bool
GetHeader(hash common.Hash, number uint64) *types.Header
GetHeaderByHash(hash common.Hash) *types.Header
CurrentHeader() *types.Header
GetTd(hash common.Hash, number uint64) *big.Int
StateCache() state.Database
InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error)
Rollback(chain []common.Hash)
GetHeaderByNumber(number uint64) *types.Header
GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64)
Genesis() *types.Block
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
}
type txPool interface {
AddRemotes(txs []*types.Transaction) []error
Status(hashes []common.Hash) []core.TxStatus
}
type ProtocolManager struct {
lightSync bool
txpool txPool
txrelay *LesTxRelay
networkId uint64
chainConfig *params.ChainConfig
iConfig *light.IndexerConfig
blockchain BlockChain
chainDb ethdb.Database
odr *LesOdr
server *LesServer
serverPool *serverPool
lesTopic discv5.Topic
reqDist *requestDistributor
retriever *retrieveManager
servingQueue *servingQueue
downloader *downloader.Downloader
fetcher *lightFetcher
peers *peerSet
maxPeers int
eventMux *event.TypeMux
// channels for fetcher, syncer, txsyncLoop
newPeerCh chan *peer
quitSync chan struct{}
noMorePeers chan struct{}
// wait group is used for graceful shutdowns during downloading
// and processing
wg *sync.WaitGroup
ulc *ulc
}
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(
chainConfig *params.ChainConfig,
indexerConfig *light.IndexerConfig,
lightSync bool,
networkId uint64,
mux *event.TypeMux,
engine consensus.Engine,
peers *peerSet,
blockchain BlockChain,
txpool txPool,
chainDb ethdb.Database,
odr *LesOdr,
txrelay *LesTxRelay,
serverPool *serverPool,
quitSync chan struct{},
wg *sync.WaitGroup,
ulcConfig *eth.ULCConfig) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
lightSync: lightSync,
eventMux: mux,
blockchain: blockchain,
chainConfig: chainConfig,
iConfig: indexerConfig,
chainDb: chainDb,
odr: odr,
networkId: networkId,
txpool: txpool,
txrelay: txrelay,
serverPool: serverPool,
peers: peers,
newPeerCh: make(chan *peer),
quitSync: quitSync,
wg: wg,
noMorePeers: make(chan struct{}),
}
if odr != nil {
manager.retriever = odr.retriever
manager.reqDist = odr.retriever.dist
} else {
manager.servingQueue = newServingQueue(int64(time.Millisecond * 10))
}
if ulcConfig != nil {
manager.ulc = newULC(ulcConfig)
}
removePeer := manager.removePeer
if disableClientRemovePeer {
removePeer = func(id string) {}
}
if lightSync {
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, nil, blockchain, removePeer)
manager.peers.notify((*downloaderPeerNotify)(manager))
manager.fetcher = newLightFetcher(manager)
}
return manager, nil
}
// removePeer initiates disconnection from a peer by removing it from the peer set
func (pm *ProtocolManager) removePeer(id string) {
pm.peers.Unregister(id)
}
func (pm *ProtocolManager) Start(maxPeers int) {
pm.maxPeers = maxPeers
if pm.lightSync {
go pm.syncer()
} else {
go func() {
for range pm.newPeerCh {
}
}()
}
}
func (pm *ProtocolManager) Stop() {
// Showing a log message. During download / process this could actually
// take between 5 to 10 seconds and therefor feedback is required.
log.Info("Stopping light Ethereum protocol")
// Quit the sync loop.
// After this send has completed, no new peers will be accepted.
pm.noMorePeers <- struct{}{}
close(pm.quitSync) // quits syncer, fetcher
if pm.servingQueue != nil {
pm.servingQueue.stop()
}
// Disconnect existing sessions.
// This also closes the gate for any new registrations on the peer set.
// sessions which are already established but not added to pm.peers yet
// will exit when they try to register.
pm.peers.Close()
// Wait for any process action
pm.wg.Wait()
log.Info("Light Ethereum protocol stopped")
}
// runPeer is the p2p protocol run function for the given version.
func (pm *ProtocolManager) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
var entry *poolEntry
peer := pm.newPeer(int(version), pm.networkId, p, rw)
if pm.serverPool != nil {
entry = pm.serverPool.connect(peer, peer.Node())
}
peer.poolEntry = entry
select {
case pm.newPeerCh <- peer:
pm.wg.Add(1)
defer pm.wg.Done()
err := pm.handle(peer)
if entry != nil {
pm.serverPool.disconnect(entry)
}
return err
case <-pm.quitSync:
if entry != nil {
pm.serverPool.disconnect(entry)
}
return p2p.DiscQuitting
}
}
func (pm *ProtocolManager) newPeer(pv int, nv uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
var isTrusted bool
if pm.isULCEnabled() {
isTrusted = pm.ulc.isTrusted(p.ID())
}
return newPeer(pv, nv, isTrusted, p, newMeteredMsgWriter(rw))
}
// handle is the callback invoked to manage the life cycle of a les peer. When
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error {
// Ignore maxPeers if this is a trusted peer
// In server mode we try to check into the client pool after handshake
if pm.lightSync && pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
return p2p.DiscTooManyPeers
}
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
// Execute the LES handshake
var (
genesis = pm.blockchain.Genesis()
head = pm.blockchain.CurrentHeader()
hash = head.Hash()
number = head.Number.Uint64()
td = pm.blockchain.GetTd(hash, number)
)
if err := p.Handshake(td, hash, number, genesis.Hash(), pm.server); err != nil {
p.Log().Debug("Light Ethereum handshake failed", "err", err)
return err
}
if p.fcClient != nil {
defer p.fcClient.Disconnect()
}
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
rw.Init(p.version)
}
// Register the peer locally
if err := pm.peers.Register(p); err != nil {
p.Log().Error("Light Ethereum peer registration failed", "err", err)
return err
}
defer func() {
pm.removePeer(p.id)
}()
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
if pm.lightSync {
p.lock.Lock()
head := p.headInfo
p.lock.Unlock()
if pm.fetcher != nil {
pm.fetcher.announce(p, head)
}
if p.poolEntry != nil {
pm.serverPool.registered(p.poolEntry)
}
}
// main loop. handle incoming messages.
for {
if err := pm.handleMsg(p); err != nil {
p.Log().Debug("Light Ethereum message handling failed", "err", err)
if p.fcServer != nil {
p.fcServer.DumpLogs()
}
return err
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *ProtocolManager) handleMsg(p *peer) error {
// Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg()
if err != nil {
return err
}
p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size)
p.responseCount++
responseCount := p.responseCount
var (
maxCost uint64
task *servingTask
)
accept := func(reqID, reqCnt, maxCnt uint64) bool {
if reqCnt == 0 {
return false
}
if p.fcClient == nil || reqCnt > maxCnt {
return false
}
maxCost = p.fcCosts.getCost(msg.Code, reqCnt)
if accepted, bufShort, servingPriority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost); !accepted {
if bufShort > 0 {
p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))
}
return false
} else {
task = pm.servingQueue.newTask(servingPriority)
}
return task.start()
}
if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
defer msg.Discard()
var deliverMsg *Msg
sendResponse := func(reqID, amount uint64, reply *reply, servingTime uint64) {
p.responseLock.Lock()
defer p.responseLock.Unlock()
var replySize uint32
if reply != nil {
replySize = reply.size()
}
var realCost uint64
if pm.server.costTracker != nil {
realCost = pm.server.costTracker.realCost(servingTime, msg.Size, replySize)
pm.server.costTracker.updateStats(msg.Code, amount, servingTime, realCost)
} else {
realCost = maxCost
}
bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
if reply != nil {
p.queueSend(func() {
if err := reply.send(bv); err != nil {
p.errCh <- err
}
})
}
}
// Handle the message depending on its contents
switch msg.Code {
case StatusMsg:
p.Log().Trace("Received status message")
// Status messages should never arrive after the handshake
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
// Block header query, collect the requested headers and reply
case AnnounceMsg:
p.Log().Trace("Received announce message")
var req announceData
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
update, size := req.Update.decode()
if p.rejectUpdate(size) {
return errResp(ErrRequestRejected, "")
}
p.updateFlowControl(update)
if req.Hash != (common.Hash{}) {
if p.announceType == announceTypeNone {
return errResp(ErrUnexpectedResponse, "")
}
if p.announceType == announceTypeSigned {
if err := req.checkSignature(p.ID(), update); err != nil {
p.Log().Trace("Invalid announcement signature", "err", err)
return err
}
p.Log().Trace("Valid announcement signature")
}
p.Log().Trace("Announce message content", "number", req.Number, "hash", req.Hash, "td", req.Td, "reorg", req.ReorgDepth)
if pm.fetcher != nil {
pm.fetcher.announce(p, &req)
}
}
case GetBlockHeadersMsg:
p.Log().Trace("Received block header request")
// Decode the complex header query
var req struct {
ReqID uint64
Query getBlockHeadersData
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
query := req.Query
if !accept(req.ReqID, query.Amount, MaxHeaderFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
hashMode := query.Origin.Hash != (common.Hash{})
first := true
maxNonCanonical := uint64(100)
// Gather headers until the fetch or network limits is reached
var (
bytes common.StorageSize
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit {
if !first && !task.waitOrStop() {
return
}
// Retrieve the next header satisfying the query
var origin *types.Header
if hashMode {
if first {
origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
if origin != nil {
query.Origin.Number = origin.Number.Uint64()
}
} else {
origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
}
} else {
origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
break
}
headers = append(headers, origin)
bytes += estHeaderRlpSize
// Advance to the next header of the query
switch {
case hashMode && query.Reverse:
// Hash based traversal towards the genesis block
ancestor := query.Skip + 1
if ancestor == 0 {
unknown = true
} else {
query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
unknown = (query.Origin.Hash == common.Hash{})
}
case hashMode && !query.Reverse:
// Hash based traversal towards the leaf block
var (
current = origin.Number.Uint64()
next = current + query.Skip + 1
)
if next <= current {
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ")
p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
unknown = true
} else {
if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
nextHash := header.Hash()
expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
if expOldHash == query.Origin.Hash {
query.Origin.Hash, query.Origin.Number = nextHash, next
} else {
unknown = true
}
} else {
unknown = true
}
}
case query.Reverse:
// Number based traversal towards the genesis block
if query.Origin.Number >= query.Skip+1 {
query.Origin.Number -= query.Skip + 1
} else {
unknown = true
}
case !query.Reverse:
// Number based traversal towards the leaf block
query.Origin.Number += query.Skip + 1
}
first = false
}
sendResponse(req.ReqID, query.Amount, p.ReplyBlockHeaders(req.ReqID, headers), task.done())
}()
case BlockHeadersMsg:
if pm.downloader == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received block header response message")
// A batch of headers arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Headers []*types.Header
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
if pm.fetcher != nil && pm.fetcher.requestedID(resp.ReqID) {
pm.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)
} else {
err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
if err != nil {
log.Debug(fmt.Sprint(err))
}
}
case GetBlockBodiesMsg:
p.Log().Trace("Received block bodies request")
// Decode the retrieval message
var req struct {
ReqID uint64
Hashes []common.Hash
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather blocks until the fetch or network limits is reached
var (
bytes int
bodies []rlp.RawValue
)
reqCnt := len(req.Hashes)
if !accept(req.ReqID, uint64(reqCnt), MaxBodyFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, hash := range req.Hashes {
if i != 0 && !task.waitOrStop() {
return
}
if bytes >= softResponseLimit {
break
}
// Retrieve the requested block body, stopping if enough was found
if number := rawdb.ReadHeaderNumber(pm.chainDb, hash); number != nil {
if data := rawdb.ReadBodyRLP(pm.chainDb, hash, *number); len(data) != 0 {
bodies = append(bodies, data)
bytes += len(data)
}
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyBlockBodiesRLP(req.ReqID, bodies), task.done())
}()
case BlockBodiesMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received block bodies response")
// A batch of block bodies arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data []*types.Body
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgBlockBodies,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case GetCodeMsg:
p.Log().Trace("Received code request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []CodeReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
data [][]byte
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxCodeFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
// Look up the root hash belonging to the request
number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash)
if number == nil {
p.Log().Warn("Failed to retrieve block num for code", "hash", req.BHash)
continue
}
header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number)
if header == nil {
p.Log().Warn("Failed to retrieve header for code", "block", *number, "hash", req.BHash)
continue
}
triedb := pm.blockchain.StateCache().TrieDB()
account, err := pm.getAccount(triedb, header.Root, common.BytesToHash(req.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
continue
}
code, err := triedb.Node(common.BytesToHash(account.CodeHash))
if err != nil {
p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err)
continue
}
// Accumulate the code and abort if enough data was retrieved
data = append(data, code)
if bytes += len(code); bytes >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyCode(req.ReqID, data), task.done())
}()
case CodeMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received code response")
// A batch of node state data arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data [][]byte
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgCode,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case GetReceiptsMsg:
p.Log().Trace("Received receipts request")
// Decode the retrieval message
var req struct {
ReqID uint64
Hashes []common.Hash
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
receipts []rlp.RawValue
)
reqCnt := len(req.Hashes)
if !accept(req.ReqID, uint64(reqCnt), MaxReceiptFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, hash := range req.Hashes {
if i != 0 && !task.waitOrStop() {
return
}
if bytes >= softResponseLimit {
break
}
// Retrieve the requested block's receipts, skipping if unknown to us
var results types.Receipts
if number := rawdb.ReadHeaderNumber(pm.chainDb, hash); number != nil {
results = rawdb.ReadReceipts(pm.chainDb, hash, *number)
}
if results == nil {
if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
continue
}
}
// If known, encode and queue for response packet
if encoded, err := rlp.EncodeToBytes(results); err != nil {
log.Error("Failed to encode receipt", "err", err)
} else {
receipts = append(receipts, encoded)
bytes += len(encoded)
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyReceiptsRLP(req.ReqID, receipts), task.done())
}()
case ReceiptsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received receipts response")
// A batch of receipts arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Receipts []types.Receipts
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgReceipts,
ReqID: resp.ReqID,
Obj: resp.Receipts,
}
case GetProofsV1Msg:
p.Log().Trace("Received proofs request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []ProofReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
proofs proofsData
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
// Look up the root hash belonging to the request
number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash)
if number == nil {
p.Log().Warn("Failed to retrieve block num for proof", "hash", req.BHash)
continue
}
header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number)
if header == nil {
p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", req.BHash)
continue
}
// Open the account or storage trie for the request
statedb := pm.blockchain.StateCache()
var trie state.Trie
switch len(req.AccKey) {
case 0:
// No account key specified, open an account trie
trie, err = statedb.OpenTrie(header.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", header.Root, "err", err)
continue
}
default:
// Account key specified, open a storage trie
account, err := pm.getAccount(statedb.TrieDB(), header.Root, common.BytesToHash(req.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
continue
}
trie, err = statedb.OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "root", account.Root, "err", err)
continue
}
}
// Prove the user's request from the account or stroage trie
var proof light.NodeList
if err := trie.Prove(req.Key, 0, &proof); err != nil {
p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
continue
}
proofs = append(proofs, proof)
if bytes += proof.DataSize(); bytes >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofs(req.ReqID, proofs), task.done())
}()
case GetProofsV2Msg:
p.Log().Trace("Received les/2 proofs request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []ProofReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
lastBHash common.Hash
root common.Hash
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
nodes := light.NewNodeSet()
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
// Look up the root hash belonging to the request
var (
number *uint64
header *types.Header
trie state.Trie
)
if req.BHash != lastBHash {
root, lastBHash = common.Hash{}, req.BHash
if number = rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number == nil {
p.Log().Warn("Failed to retrieve block num for proof", "hash", req.BHash)
continue
}
if header = rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header == nil {
p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", req.BHash)
continue
}
root = header.Root
}
// Open the account or storage trie for the request
statedb := pm.blockchain.StateCache()
switch len(req.AccKey) {
case 0:
// No account key specified, open an account trie
trie, err = statedb.OpenTrie(root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err)
continue
}
default:
// Account key specified, open a storage trie
account, err := pm.getAccount(statedb.TrieDB(), root, common.BytesToHash(req.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
continue
}
trie, err = statedb.OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "root", account.Root, "err", err)
continue
}
}
// Prove the user's request from the account or stroage trie
if err := trie.Prove(req.Key, req.FromLevel, nodes); err != nil {
p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
continue
}
if nodes.DataSize() >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofsV2(req.ReqID, nodes.NodeList()), task.done())
}()
case ProofsV1Msg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received proofs response")
// A batch of merkle proofs arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data []light.NodeList
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgProofsV1,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case ProofsV2Msg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received les/2 proofs response")
// A batch of merkle proofs arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data light.NodeList
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgProofsV2,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case GetHeaderProofsMsg:
p.Log().Trace("Received headers proof request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []ChtReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
proofs []ChtResp
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
trieDb := trie.NewDatabase(rawdb.NewTable(pm.chainDb, light.ChtTablePrefix))
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*pm.iConfig.ChtSize-1)
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
trie, err := trie.New(root, trieDb)
if err != nil {
continue
}
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], req.BlockNum)
var proof light.NodeList
trie.Prove(encNumber[:], 0, &proof)
proofs = append(proofs, ChtResp{Header: header, Proof: proof})
if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit {
break
}
}
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHeaderProofs(req.ReqID, proofs), task.done())
}()
case GetHelperTrieProofsMsg:
p.Log().Trace("Received helper trie proof request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []HelperTrieReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
auxBytes int
auxData [][]byte
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
var (
lastIdx uint64
lastType uint
root common.Hash
auxTrie *trie.Trie
)
nodes := light.NewNodeSet()
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
if auxTrie == nil || req.Type != lastType || req.TrieIdx != lastIdx {
auxTrie, lastType, lastIdx = nil, req.Type, req.TrieIdx
var prefix string
if root, prefix = pm.getHelperTrie(req.Type, req.TrieIdx); root != (common.Hash{}) {
auxTrie, _ = trie.New(root, trie.NewDatabase(rawdb.NewTable(pm.chainDb, prefix)))
}
}
if req.AuxReq == auxRoot {
var data []byte
if root != (common.Hash{}) {
data = root[:]
}
auxData = append(auxData, data)
auxBytes += len(data)
} else {
if auxTrie != nil {
auxTrie.Prove(req.Key, req.FromLevel, nodes)
}
if req.AuxReq != 0 {
data := pm.getHelperTrieAuxData(req)
auxData = append(auxData, data)
auxBytes += len(data)
}
}
if nodes.DataSize()+auxBytes >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}), task.done())
}()
case HeaderProofsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received headers proof response")
var resp struct {
ReqID, BV uint64
Data []ChtResp
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgHeaderProofs,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case HelperTrieProofsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received helper trie proof response")
var resp struct {
ReqID, BV uint64
Data HelperTrieResps
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgHelperTrieProofs,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case SendTxMsg:
if pm.txpool == nil {
return errResp(ErrRequestRejected, "")
}
// Transactions arrived, parse all of them and deliver to the pool
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
reqCnt := len(txs)
if !accept(0, uint64(reqCnt), MaxTxSend) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, tx := range txs {
if i != 0 && !task.waitOrStop() {
return
}
pm.txpool.AddRemotes([]*types.Transaction{tx})
}
sendResponse(0, uint64(reqCnt), nil, task.done())
}()
case SendTxV2Msg:
if pm.txpool == nil {
return errResp(ErrRequestRejected, "")
}
// Transactions arrived, parse all of them and deliver to the pool
var req struct {
ReqID uint64
Txs []*types.Transaction
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
reqCnt := len(req.Txs)
if !accept(req.ReqID, uint64(reqCnt), MaxTxSend) {
return errResp(ErrRequestRejected, "")
}
go func() {
stats := make([]txStatus, len(req.Txs))
for i, tx := range req.Txs {
if i != 0 && !task.waitOrStop() {
return
}
hash := tx.Hash()
stats[i] = pm.txStatus(hash)
if stats[i].Status == core.TxStatusUnknown {
if errs := pm.txpool.AddRemotes([]*types.Transaction{tx}); errs[0] != nil {
stats[i].Error = errs[0].Error()
continue
}
stats[i] = pm.txStatus(hash)
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyTxStatus(req.ReqID, stats), task.done())
}()
case GetTxStatusMsg:
if pm.txpool == nil {
return errResp(ErrUnexpectedResponse, "")
}
// Transactions arrived, parse all of them and deliver to the pool
var req struct {
ReqID uint64
Hashes []common.Hash
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
reqCnt := len(req.Hashes)
if !accept(req.ReqID, uint64(reqCnt), MaxTxStatus) {
return errResp(ErrRequestRejected, "")
}
go func() {
stats := make([]txStatus, len(req.Hashes))
for i, hash := range req.Hashes {
if i != 0 && !task.waitOrStop() {
return
}
stats[i] = pm.txStatus(hash)
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyTxStatus(req.ReqID, stats), task.done())
}()
case TxStatusMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received tx status response")
var resp struct {
ReqID, BV uint64
Status []txStatus
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
default:
p.Log().Trace("Received unknown message", "code", msg.Code)
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
if deliverMsg != nil {
err := pm.retriever.deliver(p, deliverMsg)
if err != nil {
p.responseErrors++
if p.responseErrors > maxResponseErrors {
return err
}
}
}
return nil
}
// getAccount retrieves an account from the state based at root.
func (pm *ProtocolManager) getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) {
trie, err := trie.New(root, triedb)
if err != nil {
return state.Account{}, err
}
blob, err := trie.TryGet(hash[:])
if err != nil {
return state.Account{}, err
}
var account state.Account
if err = rlp.DecodeBytes(blob, &account); err != nil {
return state.Account{}, err
}
return account, nil
}
// getHelperTrie returns the post-processed trie root for the given trie ID and section index
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
switch id {
case htCanonical:
idxV1 := (idx+1)*(pm.iConfig.PairChtSize/pm.iConfig.ChtSize) - 1
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idxV1+1)*pm.iConfig.ChtSize-1)
return light.GetChtRoot(pm.chainDb, idxV1, sectionHead), light.ChtTablePrefix
case htBloomBits:
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.BloomTrieSize-1)
return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix
}
return common.Hash{}, ""
}
// getHelperTrieAuxData returns requested auxiliary data for the given HelperTrie request
func (pm *ProtocolManager) getHelperTrieAuxData(req HelperTrieReq) []byte {
if req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8 {
blockNum := binary.BigEndian.Uint64(req.Key)
hash := rawdb.ReadCanonicalHash(pm.chainDb, blockNum)
return rawdb.ReadHeaderRLP(pm.chainDb, hash, blockNum)
}
return nil
}
func (pm *ProtocolManager) txStatus(hash common.Hash) txStatus {
var stat txStatus
stat.Status = pm.txpool.Status([]common.Hash{hash})[0]
// If the transaction is unknown to the pool, try looking it up locally
if stat.Status == core.TxStatusUnknown {
if tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(pm.chainDb, hash); tx != nil {
stat.Status = core.TxStatusIncluded
stat.Lookup = &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
}
}
return stat
}
// isULCEnabled returns true if we can use ULC
func (pm *ProtocolManager) isULCEnabled() bool {
if pm.ulc == nil || len(pm.ulc.trustedKeys) == 0 {
return false
}
return true
}
// downloaderPeerNotify implements peerSetNotify
type downloaderPeerNotify ProtocolManager
type peerConnection struct {
manager *ProtocolManager
peer *peer
}
func (pc *peerConnection) Head() (common.Hash, *big.Int) {
return pc.peer.HeadAndTd()
}
func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
reqID := genReqID()
rq := &distReq{
getCost: func(dp distPeer) uint64 {
peer := dp.(*peer)
return peer.GetRequestCost(GetBlockHeadersMsg, amount)
},
canSend: func(dp distPeer) bool {
return dp.(*peer) == pc.peer
},
request: func(dp distPeer) func() {
peer := dp.(*peer)
cost := peer.GetRequestCost(GetBlockHeadersMsg, amount)
peer.fcServer.QueuedRequest(reqID, cost)
return func() { peer.RequestHeadersByHash(reqID, cost, origin, amount, skip, reverse) }
},
}
_, ok := <-pc.manager.reqDist.queue(rq)
if !ok {
return light.ErrNoPeers
}
return nil
}
func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
reqID := genReqID()
rq := &distReq{
getCost: func(dp distPeer) uint64 {
peer := dp.(*peer)
return peer.GetRequestCost(GetBlockHeadersMsg, amount)
},
canSend: func(dp distPeer) bool {
return dp.(*peer) == pc.peer
},
request: func(dp distPeer) func() {
peer := dp.(*peer)
cost := peer.GetRequestCost(GetBlockHeadersMsg, amount)
peer.fcServer.QueuedRequest(reqID, cost)
return func() { peer.RequestHeadersByNumber(reqID, cost, origin, amount, skip, reverse) }
},
}
_, ok := <-pc.manager.reqDist.queue(rq)
if !ok {
return light.ErrNoPeers
}
return nil
}
func (d *downloaderPeerNotify) registerPeer(p *peer) {
pm := (*ProtocolManager)(d)
pc := &peerConnection{
manager: pm,
peer: p,
}
pm.downloader.RegisterLightPeer(p.id, ethVersion, pc)
}
func (d *downloaderPeerNotify) unregisterPeer(p *peer) {
pm := (*ProtocolManager)(d)
pm.downloader.UnregisterPeer(p.id)
}
| les/handler.go | 1 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.997778594493866,
0.015224715694785118,
0.00016367588250432163,
0.00017348524124827236,
0.11958920210599899
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\tbv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)\n",
"\t\tif reply != nil {\n",
"\t\t\tp.queueSend(func() {\n",
"\t\t\t\tif err := reply.send(bv); err != nil {\n",
"\t\t\t\t\tp.errCh <- err\n",
"\t\t\t\t}\n",
"\t\t\t})\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\tselect {\n",
"\t\t\t\t\tcase p.errCh <- err:\n",
"\t\t\t\t\tdefault:\n",
"\t\t\t\t\t}\n"
],
"file_path": "les/handler.go",
"type": "replace",
"edit_start_line_idx": 391
} | # XHandler
[](https://godoc.org/github.com/rs/xhandler) [](https://raw.githubusercontent.com/rs/xhandler/master/LICENSE) [](https://travis-ci.org/rs/xhandler) [](http://gocover.io/github.com/rs/xhandler)
XHandler is a bridge between [net/context](https://godoc.org/golang.org/x/net/context) and `http.Handler`.
It lets you enforce `net/context` in your handlers without sacrificing compatibility with existing `http.Handlers` nor imposing a specific router.
Thanks to `net/context` deadline management, `xhandler` is able to enforce a per request deadline and will cancel the context when the client closes the connection unexpectedly.
You may create your own `net/context` aware handler pretty much the same way as you would do with http.Handler.
Read more about xhandler on [Dailymotion engineering blog](http://engineering.dailymotion.com/our-way-to-go/).
## Installing
go get -u github.com/rs/xhandler
## Usage
```go
package main
import (
"log"
"net/http"
"time"
"github.com/rs/cors"
"github.com/rs/xhandler"
"golang.org/x/net/context"
)
type myMiddleware struct {
next xhandler.HandlerC
}
func (h myMiddleware) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) {
ctx = context.WithValue(ctx, "test", "World")
h.next.ServeHTTPC(ctx, w, r)
}
func main() {
c := xhandler.Chain{}
// Add close notifier handler so context is cancelled when the client closes
// the connection
c.UseC(xhandler.CloseHandler)
// Add timeout handler
c.UseC(xhandler.TimeoutHandler(2 * time.Second))
// Middleware putting something in the context
c.UseC(func(next xhandler.HandlerC) xhandler.HandlerC {
return myMiddleware{next: next}
})
// Mix it with a non-context-aware middleware handler
c.Use(cors.Default().Handler)
// Final handler (using handlerFuncC), reading from the context
xh := xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) {
value := ctx.Value("test").(string)
w.Write([]byte("Hello " + value))
})
// Bridge context aware handlers with http.Handler using xhandler.Handle()
http.Handle("/test", c.Handler(xh))
if err := http.ListenAndServe(":8080", nil); err != nil {
log.Fatal(err)
}
}
```
### Using xmux
Xhandler comes with an optional context aware [muxer](https://github.com/rs/xmux) forked from [httprouter](https://github.com/julienschmidt/httprouter):
```go
package main
import (
"fmt"
"log"
"net/http"
"time"
"github.com/rs/xhandler"
"github.com/rs/xmux"
"golang.org/x/net/context"
)
func main() {
c := xhandler.Chain{}
// Append a context-aware middleware handler
c.UseC(xhandler.CloseHandler)
// Another context-aware middleware handler
c.UseC(xhandler.TimeoutHandler(2 * time.Second))
mux := xmux.New()
// Use c.Handler to terminate the chain with your final handler
mux.GET("/welcome/:name", xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) {
fmt.Fprintf(w, "Welcome %s!", xmux.Params(ctx).Get("name"))
}))
if err := http.ListenAndServe(":8080", c.Handler(mux)); err != nil {
log.Fatal(err)
}
}
```
See [xmux](https://github.com/rs/xmux) for more examples.
## Context Aware Middleware
Here is a list of `net/context` aware middleware handlers implementing `xhandler.HandlerC` interface.
Feel free to put up a PR linking your middleware if you have built one:
| Middleware | Author | Description |
| ---------- | ------ | ----------- |
| [xmux](https://github.com/rs/xmux) | [Olivier Poitrey](https://github.com/rs) | HTTP request muxer |
| [xlog](https://github.com/rs/xlog) | [Olivier Poitrey](https://github.com/rs) | HTTP handler logger |
| [xstats](https://github.com/rs/xstats) | [Olivier Poitrey](https://github.com/rs) | A generic client for service instrumentation |
| [xaccess](https://github.com/rs/xaccess) | [Olivier Poitrey](https://github.com/rs) | HTTP handler access logger with [xlog](https://github.com/rs/xlog) and [xstats](https://github.com/rs/xstats) |
| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support |
## Licenses
All source code is licensed under the [MIT License](https://raw.github.com/rs/xhandler/master/LICENSE).
| vendor/github.com/rs/xhandler/README.md | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.00017635962285567075,
0.00016841075557749718,
0.00015881162835285068,
0.00016761012375354767,
0.000004695850748248631
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\tbv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)\n",
"\t\tif reply != nil {\n",
"\t\t\tp.queueSend(func() {\n",
"\t\t\t\tif err := reply.send(bv); err != nil {\n",
"\t\t\t\t\tp.errCh <- err\n",
"\t\t\t\t}\n",
"\t\t\t})\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\tselect {\n",
"\t\t\t\t\tcase p.errCh <- err:\n",
"\t\t\t\t\tdefault:\n",
"\t\t\t\t\t}\n"
],
"file_path": "les/handler.go",
"type": "replace",
"edit_start_line_idx": 391
} | package packer
import (
"fmt"
"math"
"reflect"
"strings"
"github.com/graph-gophers/graphql-go/errors"
"github.com/graph-gophers/graphql-go/internal/common"
"github.com/graph-gophers/graphql-go/internal/schema"
)
type packer interface {
Pack(value interface{}) (reflect.Value, error)
}
type Builder struct {
packerMap map[typePair]*packerMapEntry
structPackers []*StructPacker
}
type typePair struct {
graphQLType common.Type
resolverType reflect.Type
}
type packerMapEntry struct {
packer packer
targets []*packer
}
func NewBuilder() *Builder {
return &Builder{
packerMap: make(map[typePair]*packerMapEntry),
}
}
func (b *Builder) Finish() error {
for _, entry := range b.packerMap {
for _, target := range entry.targets {
*target = entry.packer
}
}
for _, p := range b.structPackers {
p.defaultStruct = reflect.New(p.structType).Elem()
for _, f := range p.fields {
if defaultVal := f.field.Default; defaultVal != nil {
v, err := f.fieldPacker.Pack(defaultVal.Value(nil))
if err != nil {
return err
}
p.defaultStruct.FieldByIndex(f.fieldIndex).Set(v)
}
}
}
return nil
}
func (b *Builder) assignPacker(target *packer, schemaType common.Type, reflectType reflect.Type) error {
k := typePair{schemaType, reflectType}
ref, ok := b.packerMap[k]
if !ok {
ref = &packerMapEntry{}
b.packerMap[k] = ref
var err error
ref.packer, err = b.makePacker(schemaType, reflectType)
if err != nil {
return err
}
}
ref.targets = append(ref.targets, target)
return nil
}
func (b *Builder) makePacker(schemaType common.Type, reflectType reflect.Type) (packer, error) {
t, nonNull := unwrapNonNull(schemaType)
if !nonNull {
if reflectType.Kind() != reflect.Ptr {
return nil, fmt.Errorf("%s is not a pointer", reflectType)
}
elemType := reflectType.Elem()
addPtr := true
if _, ok := t.(*schema.InputObject); ok {
elemType = reflectType // keep pointer for input objects
addPtr = false
}
elem, err := b.makeNonNullPacker(t, elemType)
if err != nil {
return nil, err
}
return &nullPacker{
elemPacker: elem,
valueType: reflectType,
addPtr: addPtr,
}, nil
}
return b.makeNonNullPacker(t, reflectType)
}
func (b *Builder) makeNonNullPacker(schemaType common.Type, reflectType reflect.Type) (packer, error) {
if u, ok := reflect.New(reflectType).Interface().(Unmarshaler); ok {
if !u.ImplementsGraphQLType(schemaType.String()) {
return nil, fmt.Errorf("can not unmarshal %s into %s", schemaType, reflectType)
}
return &unmarshalerPacker{
ValueType: reflectType,
}, nil
}
switch t := schemaType.(type) {
case *schema.Scalar:
return &ValuePacker{
ValueType: reflectType,
}, nil
case *schema.Enum:
if reflectType.Kind() != reflect.String {
return nil, fmt.Errorf("wrong type, expected %s", reflect.String)
}
return &ValuePacker{
ValueType: reflectType,
}, nil
case *schema.InputObject:
e, err := b.MakeStructPacker(t.Values, reflectType)
if err != nil {
return nil, err
}
return e, nil
case *common.List:
if reflectType.Kind() != reflect.Slice {
return nil, fmt.Errorf("expected slice, got %s", reflectType)
}
p := &listPacker{
sliceType: reflectType,
}
if err := b.assignPacker(&p.elem, t.OfType, reflectType.Elem()); err != nil {
return nil, err
}
return p, nil
case *schema.Object, *schema.Interface, *schema.Union:
return nil, fmt.Errorf("type of kind %s can not be used as input", t.Kind())
default:
panic("unreachable")
}
}
func (b *Builder) MakeStructPacker(values common.InputValueList, typ reflect.Type) (*StructPacker, error) {
structType := typ
usePtr := false
if typ.Kind() == reflect.Ptr {
structType = typ.Elem()
usePtr = true
}
if structType.Kind() != reflect.Struct {
return nil, fmt.Errorf("expected struct or pointer to struct, got %s", typ)
}
var fields []*structPackerField
for _, v := range values {
fe := &structPackerField{field: v}
fx := func(n string) bool {
return strings.EqualFold(stripUnderscore(n), stripUnderscore(v.Name.Name))
}
sf, ok := structType.FieldByNameFunc(fx)
if !ok {
return nil, fmt.Errorf("missing argument %q", v.Name)
}
if sf.PkgPath != "" {
return nil, fmt.Errorf("field %q must be exported", sf.Name)
}
fe.fieldIndex = sf.Index
ft := v.Type
if v.Default != nil {
ft, _ = unwrapNonNull(ft)
ft = &common.NonNull{OfType: ft}
}
if err := b.assignPacker(&fe.fieldPacker, ft, sf.Type); err != nil {
return nil, fmt.Errorf("field %q: %s", sf.Name, err)
}
fields = append(fields, fe)
}
p := &StructPacker{
structType: structType,
usePtr: usePtr,
fields: fields,
}
b.structPackers = append(b.structPackers, p)
return p, nil
}
type StructPacker struct {
structType reflect.Type
usePtr bool
defaultStruct reflect.Value
fields []*structPackerField
}
type structPackerField struct {
field *common.InputValue
fieldIndex []int
fieldPacker packer
}
func (p *StructPacker) Pack(value interface{}) (reflect.Value, error) {
if value == nil {
return reflect.Value{}, errors.Errorf("got null for non-null")
}
values := value.(map[string]interface{})
v := reflect.New(p.structType)
v.Elem().Set(p.defaultStruct)
for _, f := range p.fields {
if value, ok := values[f.field.Name.Name]; ok {
packed, err := f.fieldPacker.Pack(value)
if err != nil {
return reflect.Value{}, err
}
v.Elem().FieldByIndex(f.fieldIndex).Set(packed)
}
}
if !p.usePtr {
return v.Elem(), nil
}
return v, nil
}
type listPacker struct {
sliceType reflect.Type
elem packer
}
func (e *listPacker) Pack(value interface{}) (reflect.Value, error) {
list, ok := value.([]interface{})
if !ok {
list = []interface{}{value}
}
v := reflect.MakeSlice(e.sliceType, len(list), len(list))
for i := range list {
packed, err := e.elem.Pack(list[i])
if err != nil {
return reflect.Value{}, err
}
v.Index(i).Set(packed)
}
return v, nil
}
type nullPacker struct {
elemPacker packer
valueType reflect.Type
addPtr bool
}
func (p *nullPacker) Pack(value interface{}) (reflect.Value, error) {
if value == nil {
return reflect.Zero(p.valueType), nil
}
v, err := p.elemPacker.Pack(value)
if err != nil {
return reflect.Value{}, err
}
if p.addPtr {
ptr := reflect.New(p.valueType.Elem())
ptr.Elem().Set(v)
return ptr, nil
}
return v, nil
}
type ValuePacker struct {
ValueType reflect.Type
}
func (p *ValuePacker) Pack(value interface{}) (reflect.Value, error) {
if value == nil {
return reflect.Value{}, errors.Errorf("got null for non-null")
}
coerced, err := unmarshalInput(p.ValueType, value)
if err != nil {
return reflect.Value{}, fmt.Errorf("could not unmarshal %#v (%T) into %s: %s", value, value, p.ValueType, err)
}
return reflect.ValueOf(coerced), nil
}
type unmarshalerPacker struct {
ValueType reflect.Type
}
func (p *unmarshalerPacker) Pack(value interface{}) (reflect.Value, error) {
if value == nil {
return reflect.Value{}, errors.Errorf("got null for non-null")
}
v := reflect.New(p.ValueType)
if err := v.Interface().(Unmarshaler).UnmarshalGraphQL(value); err != nil {
return reflect.Value{}, err
}
return v.Elem(), nil
}
type Unmarshaler interface {
ImplementsGraphQLType(name string) bool
UnmarshalGraphQL(input interface{}) error
}
func unmarshalInput(typ reflect.Type, input interface{}) (interface{}, error) {
if reflect.TypeOf(input) == typ {
return input, nil
}
switch typ.Kind() {
case reflect.Int32:
switch input := input.(type) {
case int:
if input < math.MinInt32 || input > math.MaxInt32 {
return nil, fmt.Errorf("not a 32-bit integer")
}
return int32(input), nil
case float64:
coerced := int32(input)
if input < math.MinInt32 || input > math.MaxInt32 || float64(coerced) != input {
return nil, fmt.Errorf("not a 32-bit integer")
}
return coerced, nil
}
case reflect.Float64:
switch input := input.(type) {
case int32:
return float64(input), nil
case int:
return float64(input), nil
}
case reflect.String:
if reflect.TypeOf(input).ConvertibleTo(typ) {
return reflect.ValueOf(input).Convert(typ).Interface(), nil
}
}
return nil, fmt.Errorf("incompatible type")
}
func unwrapNonNull(t common.Type) (common.Type, bool) {
if nn, ok := t.(*common.NonNull); ok {
return nn.OfType, true
}
return t, false
}
func stripUnderscore(s string) string {
return strings.Replace(s, "_", "", -1)
}
| vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.002329179085791111,
0.00035703295725397766,
0.00016029694234021008,
0.00017419364303350449,
0.00041969234007410705
] |
{
"id": 1,
"code_window": [
"\t\t}\n",
"\t\tbv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)\n",
"\t\tif reply != nil {\n",
"\t\t\tp.queueSend(func() {\n",
"\t\t\t\tif err := reply.send(bv); err != nil {\n",
"\t\t\t\t\tp.errCh <- err\n",
"\t\t\t\t}\n",
"\t\t\t})\n",
"\t\t}\n",
"\t}\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t\tselect {\n",
"\t\t\t\t\tcase p.errCh <- err:\n",
"\t\t\t\t\tdefault:\n",
"\t\t\t\t\t}\n"
],
"file_path": "les/handler.go",
"type": "replace",
"edit_start_line_idx": 391
} | package gosigar
import (
"time"
)
type ConcreteSigar struct{}
func (c *ConcreteSigar) CollectCpuStats(collectionInterval time.Duration) (<-chan Cpu, chan<- struct{}) {
// samplesCh is buffered to 1 value to immediately return first CPU sample
samplesCh := make(chan Cpu, 1)
stopCh := make(chan struct{})
go func() {
var cpuUsage Cpu
// Immediately provide non-delta value.
// samplesCh is buffered to 1 value, so it will not block.
cpuUsage.Get()
samplesCh <- cpuUsage
ticker := time.NewTicker(collectionInterval)
for {
select {
case <-ticker.C:
previousCpuUsage := cpuUsage
cpuUsage.Get()
select {
case samplesCh <- cpuUsage.Delta(previousCpuUsage):
default:
// Include default to avoid channel blocking
}
case <-stopCh:
return
}
}
}()
return samplesCh, stopCh
}
func (c *ConcreteSigar) GetLoadAverage() (LoadAverage, error) {
l := LoadAverage{}
err := l.Get()
return l, err
}
func (c *ConcreteSigar) GetMem() (Mem, error) {
m := Mem{}
err := m.Get()
return m, err
}
func (c *ConcreteSigar) GetSwap() (Swap, error) {
s := Swap{}
err := s.Get()
return s, err
}
func (c *ConcreteSigar) GetHugeTLBPages() (HugeTLBPages, error) {
p := HugeTLBPages{}
err := p.Get()
return p, err
}
func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) {
f := FileSystemUsage{}
err := f.Get(path)
return f, err
}
func (c *ConcreteSigar) GetFDUsage() (FDUsage, error) {
fd := FDUsage{}
err := fd.Get()
return fd, err
}
// GetRusage return the resource usage of the process
// Possible params: 0 = RUSAGE_SELF, 1 = RUSAGE_CHILDREN, 2 = RUSAGE_THREAD
func (c *ConcreteSigar) GetRusage(who int) (Rusage, error) {
r := Rusage{}
err := r.Get(who)
return r, err
}
| vendor/github.com/elastic/gosigar/concrete_sigar.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.002199288457632065,
0.00039646608638577163,
0.00016486849926877767,
0.00016810976376291364,
0.0006374231306836009
] |
{
"id": 2,
"code_window": [
"\tisOnlyAnnounce bool\n",
"}\n",
"\n",
"func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {\n",
"\tid := p.ID()\n",
"\n",
"\treturn &peer{\n",
"\t\tPeer: p,\n",
"\t\trw: rw,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "les/peer.go",
"type": "replace",
"edit_start_line_idx": 100
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
)
var (
errClosed = errors.New("peer set is closed")
errAlreadyRegistered = errors.New("peer is already registered")
errNotRegistered = errors.New("peer is not registered")
errInvalidHelpTrieReq = errors.New("invalid help trie request")
)
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
// capacity limitation for parameter updates
const (
allowedUpdateBytes = 100000 // initial/maximum allowed update size
allowedUpdateRate = time.Millisecond * 10 // time constant for recharging one byte of allowance
)
// if the total encoded size of a sent transaction batch is over txSizeCostLimit
// per transaction then the request cost is calculated as proportional to the
// encoded size instead of the transaction count
const txSizeCostLimit = 0x10000
const (
announceTypeNone = iota
announceTypeSimple
announceTypeSigned
)
type peer struct {
*p2p.Peer
rw p2p.MsgReadWriter
version int // Protocol version negotiated
network uint64 // Network ID being on
announceType uint64
id string
headInfo *announceData
lock sync.RWMutex
sendQueue *execQueue
errCh chan error
// responseLock ensures that responses are queued in the same order as
// RequestProcessed is called
responseLock sync.Mutex
responseCount uint64
poolEntry *poolEntry
hasBlock func(common.Hash, uint64, bool) bool
responseErrors int
updateCounter uint64
updateTime mclock.AbsTime
fcClient *flowcontrol.ClientNode // nil if the peer is server only
fcServer *flowcontrol.ServerNode // nil if the peer is client only
fcParams flowcontrol.ServerParams
fcCosts requestCostTable
isTrusted bool
isOnlyAnnounce bool
}
func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID()
return &peer{
Peer: p,
rw: rw,
version: version,
network: network,
id: fmt.Sprintf("%x", id),
isTrusted: isTrusted,
}
}
// rejectUpdate returns true if a parameter update has to be rejected because
// the size and/or rate of updates exceed the capacity limitation
func (p *peer) rejectUpdate(size uint64) bool {
now := mclock.Now()
if p.updateCounter == 0 {
p.updateTime = now
} else {
dt := now - p.updateTime
r := uint64(dt / mclock.AbsTime(allowedUpdateRate))
if p.updateCounter > r {
p.updateCounter -= r
p.updateTime += mclock.AbsTime(allowedUpdateRate * time.Duration(r))
} else {
p.updateCounter = 0
p.updateTime = now
}
}
p.updateCounter += size
return p.updateCounter > allowedUpdateBytes
}
func (p *peer) canQueue() bool {
return p.sendQueue.canQueue()
}
func (p *peer) queueSend(f func()) {
p.sendQueue.queue(f)
}
// Info gathers and returns a collection of metadata known about a peer.
func (p *peer) Info() *eth.PeerInfo {
return ð.PeerInfo{
Version: p.version,
Difficulty: p.Td(),
Head: fmt.Sprintf("%x", p.Head()),
}
}
// Head retrieves a copy of the current head (most recent) hash of the peer.
func (p *peer) Head() (hash common.Hash) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.headInfo.Hash[:])
return hash
}
func (p *peer) HeadAndTd() (hash common.Hash, td *big.Int) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.headInfo.Hash[:])
return hash, p.headInfo.Td
}
func (p *peer) headBlockInfo() blockInfo {
p.lock.RLock()
defer p.lock.RUnlock()
return blockInfo{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}
}
// Td retrieves the current total difficulty of a peer.
func (p *peer) Td() *big.Int {
p.lock.RLock()
defer p.lock.RUnlock()
return new(big.Int).Set(p.headInfo.Td)
}
// waitBefore implements distPeer interface
func (p *peer) waitBefore(maxCost uint64) (time.Duration, float64) {
return p.fcServer.CanSend(maxCost)
}
// updateCapacity updates the request serving capacity assigned to a given client
// and also sends an announcement about the updated flow control parameters
func (p *peer) updateCapacity(cap uint64) {
p.responseLock.Lock()
defer p.responseLock.Unlock()
p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}
p.fcClient.UpdateParams(p.fcParams)
var kvList keyValueList
kvList = kvList.add("flowControl/MRR", cap)
kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
p.queueSend(func() { p.SendAnnounce(announceData{Update: kvList}) })
}
func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) error {
type req struct {
ReqID uint64
Data interface{}
}
return p2p.Send(w, msgcode, req{reqID, data})
}
// reply struct represents a reply with the actual data already RLP encoded and
// only the bv (buffer value) missing. This allows the serving mechanism to
// calculate the bv value which depends on the data size before sending the reply.
type reply struct {
w p2p.MsgWriter
msgcode, reqID uint64
data rlp.RawValue
}
// send sends the reply with the calculated buffer value
func (r *reply) send(bv uint64) error {
type resp struct {
ReqID, BV uint64
Data rlp.RawValue
}
return p2p.Send(r.w, r.msgcode, resp{r.reqID, bv, r.data})
}
// size returns the RLP encoded size of the message data
func (r *reply) size() uint32 {
return uint32(len(r.data))
}
func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
if cost > p.fcParams.BufLimit {
cost = p.fcParams.BufLimit
}
return cost
}
func (p *peer) GetTxRelayCost(amount, size int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
var msgcode uint64
switch p.version {
case lpv1:
msgcode = SendTxMsg
case lpv2:
msgcode = SendTxV2Msg
default:
panic(nil)
}
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
sizeCost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(size)/txSizeCostLimit
if sizeCost > cost {
cost = sizeCost
}
if cost > p.fcParams.BufLimit {
cost = p.fcParams.BufLimit
}
return cost
}
// HasBlock checks if the peer has a given block
func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
p.lock.RLock()
hasBlock := p.hasBlock
p.lock.RUnlock()
return hasBlock != nil && hasBlock(hash, number, hasState)
}
// SendAnnounce announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendAnnounce(request announceData) error {
return p2p.Send(p.rw, AnnounceMsg, request)
}
// ReplyBlockHeaders creates a reply with a batch of block headers
func (p *peer) ReplyBlockHeaders(reqID uint64, headers []*types.Header) *reply {
data, _ := rlp.EncodeToBytes(headers)
return &reply{p.rw, BlockHeadersMsg, reqID, data}
}
// ReplyBlockBodiesRLP creates a reply with a batch of block contents from
// an already RLP encoded format.
func (p *peer) ReplyBlockBodiesRLP(reqID uint64, bodies []rlp.RawValue) *reply {
data, _ := rlp.EncodeToBytes(bodies)
return &reply{p.rw, BlockBodiesMsg, reqID, data}
}
// ReplyCode creates a reply with a batch of arbitrary internal data, corresponding to the
// hashes requested.
func (p *peer) ReplyCode(reqID uint64, codes [][]byte) *reply {
data, _ := rlp.EncodeToBytes(codes)
return &reply{p.rw, CodeMsg, reqID, data}
}
// ReplyReceiptsRLP creates a reply with a batch of transaction receipts, corresponding to the
// ones requested from an already RLP encoded format.
func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {
data, _ := rlp.EncodeToBytes(receipts)
return &reply{p.rw, ReceiptsMsg, reqID, data}
}
// ReplyProofs creates a reply with a batch of legacy LES/1 merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofs(reqID uint64, proofs proofsData) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV1Msg, reqID, data}
}
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV2Msg, reqID, data}
}
// ReplyHeaderProofs creates a reply with a batch of legacy LES/1 header proofs, corresponding to the ones requested.
func (p *peer) ReplyHeaderProofs(reqID uint64, proofs []ChtResp) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, HeaderProofsMsg, reqID, data}
}
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
data, _ := rlp.EncodeToBytes(resp)
return &reply{p.rw, HelperTrieProofsMsg, reqID, data}
}
// ReplyTxStatus creates a reply with a batch of transaction status records, corresponding to the ones requested.
func (p *peer) ReplyTxStatus(reqID uint64, stats []txStatus) *reply {
data, _ := rlp.EncodeToBytes(stats)
return &reply{p.rw, TxStatusMsg, reqID, data}
}
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
// specified header query, based on the hash of an origin block.
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block.
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified.
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
}
// RequestCode fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *peer) RequestCode(reqID, cost uint64, reqs []CodeReq) error {
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
}
// RequestReceipts fetches a batch of transaction receipts from a remote node.
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
}
// RequestProofs fetches a batch of merkle proofs from a remote node.
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
switch p.version {
case lpv1:
return sendRequest(p.rw, GetProofsV1Msg, reqID, cost, reqs)
case lpv2:
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
default:
panic(nil)
}
}
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, data interface{}) error {
switch p.version {
case lpv1:
reqs, ok := data.([]ChtReq)
if !ok {
return errInvalidHelpTrieReq
}
p.Log().Debug("Fetching batch of header proofs", "count", len(reqs))
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
case lpv2:
reqs, ok := data.([]HelperTrieReq)
if !ok {
return errInvalidHelpTrieReq
}
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
default:
panic(nil)
}
}
// RequestTxStatus fetches a batch of transaction status records from a remote node.
func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error {
p.Log().Debug("Requesting transaction status", "count", len(txHashes))
return sendRequest(p.rw, GetTxStatusMsg, reqID, cost, txHashes)
}
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
p.Log().Debug("Sending batch of transactions", "size", len(txs))
switch p.version {
case lpv1:
return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID
case lpv2:
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
default:
panic(nil)
}
}
type keyValueEntry struct {
Key string
Value rlp.RawValue
}
type keyValueList []keyValueEntry
type keyValueMap map[string]rlp.RawValue
func (l keyValueList) add(key string, val interface{}) keyValueList {
var entry keyValueEntry
entry.Key = key
if val == nil {
val = uint64(0)
}
enc, err := rlp.EncodeToBytes(val)
if err == nil {
entry.Value = enc
}
return append(l, entry)
}
func (l keyValueList) decode() (keyValueMap, uint64) {
m := make(keyValueMap)
var size uint64
for _, entry := range l {
m[entry.Key] = entry.Value
size += uint64(len(entry.Key)) + uint64(len(entry.Value)) + 8
}
return m, size
}
func (m keyValueMap) get(key string, val interface{}) error {
enc, ok := m[key]
if !ok {
return errResp(ErrMissingKey, "%s", key)
}
if val == nil {
return nil
}
return rlp.DecodeBytes(enc, val)
}
func (p *peer) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) {
// Send out own handshake in a new thread
errc := make(chan error, 1)
go func() {
errc <- p2p.Send(p.rw, StatusMsg, sendList)
}()
// In the mean time retrieve the remote status message
msg, err := p.rw.ReadMsg()
if err != nil {
return nil, err
}
if msg.Code != StatusMsg {
return nil, errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
}
if msg.Size > ProtocolMaxMsgSize {
return nil, errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
// Decode the handshake
var recvList keyValueList
if err := msg.Decode(&recvList); err != nil {
return nil, errResp(ErrDecode, "msg %v: %v", msg, err)
}
if err := <-errc; err != nil {
return nil, err
}
return recvList, nil
}
// Handshake executes the les protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
p.lock.Lock()
defer p.lock.Unlock()
var send keyValueList
send = send.add("protocolVersion", uint64(p.version))
send = send.add("networkId", p.network)
send = send.add("headTd", td)
send = send.add("headHash", head)
send = send.add("headNum", headNum)
send = send.add("genesisHash", genesis)
if server != nil {
if !server.onlyAnnounce {
//only announce server. It sends only announse requests
send = send.add("serveHeaders", nil)
send = send.add("serveChainSince", uint64(0))
send = send.add("serveStateSince", uint64(0))
send = send.add("txRelay", nil)
}
send = send.add("flowControl/BL", server.defParams.BufLimit)
send = send.add("flowControl/MRR", server.defParams.MinRecharge)
var costList RequestCostList
if server.costTracker != nil {
costList = server.costTracker.makeCostList()
} else {
costList = testCostList()
}
send = send.add("flowControl/MRC", costList)
p.fcCosts = costList.decode()
p.fcParams = server.defParams
} else {
//on client node
p.announceType = announceTypeSimple
if p.isTrusted {
p.announceType = announceTypeSigned
}
send = send.add("announceType", p.announceType)
}
recvList, err := p.sendReceiveHandshake(send)
if err != nil {
return err
}
recv, size := recvList.decode()
if p.rejectUpdate(size) {
return errResp(ErrRequestRejected, "")
}
var rGenesis, rHash common.Hash
var rVersion, rNetwork, rNum uint64
var rTd *big.Int
if err := recv.get("protocolVersion", &rVersion); err != nil {
return err
}
if err := recv.get("networkId", &rNetwork); err != nil {
return err
}
if err := recv.get("headTd", &rTd); err != nil {
return err
}
if err := recv.get("headHash", &rHash); err != nil {
return err
}
if err := recv.get("headNum", &rNum); err != nil {
return err
}
if err := recv.get("genesisHash", &rGenesis); err != nil {
return err
}
if rGenesis != genesis {
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", rGenesis[:8], genesis[:8])
}
if rNetwork != p.network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", rNetwork, p.network)
}
if int(rVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version)
}
if server != nil {
// until we have a proper peer connectivity API, allow LES connection to other servers
/*if recv.get("serveStateSince", nil) == nil {
return errResp(ErrUselessPeer, "wanted client, got server")
}*/
if recv.get("announceType", &p.announceType) != nil {
//set default announceType on server side
p.announceType = announceTypeSimple
}
p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
} else {
//mark OnlyAnnounce server if "serveHeaders", "serveChainSince", "serveStateSince" or "txRelay" fields don't exist
if recv.get("serveChainSince", nil) != nil {
p.isOnlyAnnounce = true
}
if recv.get("serveStateSince", nil) != nil {
p.isOnlyAnnounce = true
}
if recv.get("txRelay", nil) != nil {
p.isOnlyAnnounce = true
}
if p.isOnlyAnnounce && !p.isTrusted {
return errResp(ErrUselessPeer, "peer cannot serve requests")
}
var params flowcontrol.ServerParams
if err := recv.get("flowControl/BL", ¶ms.BufLimit); err != nil {
return err
}
if err := recv.get("flowControl/MRR", ¶ms.MinRecharge); err != nil {
return err
}
var MRC RequestCostList
if err := recv.get("flowControl/MRC", &MRC); err != nil {
return err
}
p.fcParams = params
p.fcServer = flowcontrol.NewServerNode(params, &mclock.System{})
p.fcCosts = MRC.decode()
}
p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}
return nil
}
// updateFlowControl updates the flow control parameters belonging to the server
// node if the announced key/value set contains relevant fields
func (p *peer) updateFlowControl(update keyValueMap) {
if p.fcServer == nil {
return
}
params := p.fcParams
updateParams := false
if update.get("flowControl/BL", ¶ms.BufLimit) == nil {
updateParams = true
}
if update.get("flowControl/MRR", ¶ms.MinRecharge) == nil {
updateParams = true
}
if updateParams {
p.fcParams = params
p.fcServer.UpdateParams(params)
}
var MRC RequestCostList
if update.get("flowControl/MRC", &MRC) == nil {
p.fcCosts = MRC.decode()
}
}
// String implements fmt.Stringer.
func (p *peer) String() string {
return fmt.Sprintf("Peer %s [%s]", p.id,
fmt.Sprintf("les/%d", p.version),
)
}
// peerSetNotify is a callback interface to notify services about added or
// removed peers
type peerSetNotify interface {
registerPeer(*peer)
unregisterPeer(*peer)
}
// peerSet represents the collection of active peers currently participating in
// the Light Ethereum sub-protocol.
type peerSet struct {
peers map[string]*peer
lock sync.RWMutex
notifyList []peerSetNotify
closed bool
}
// newPeerSet creates a new peer set to track the active participants.
func newPeerSet() *peerSet {
return &peerSet{
peers: make(map[string]*peer),
}
}
// notify adds a service to be notified about added or removed peers
func (ps *peerSet) notify(n peerSetNotify) {
ps.lock.Lock()
ps.notifyList = append(ps.notifyList, n)
peers := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
peers = append(peers, p)
}
ps.lock.Unlock()
for _, p := range peers {
n.registerPeer(p)
}
}
// Register injects a new peer into the working set, or returns an error if the
// peer is already known.
func (ps *peerSet) Register(p *peer) error {
ps.lock.Lock()
if ps.closed {
ps.lock.Unlock()
return errClosed
}
if _, ok := ps.peers[p.id]; ok {
ps.lock.Unlock()
return errAlreadyRegistered
}
ps.peers[p.id] = p
p.sendQueue = newExecQueue(100)
peers := make([]peerSetNotify, len(ps.notifyList))
copy(peers, ps.notifyList)
ps.lock.Unlock()
for _, n := range peers {
n.registerPeer(p)
}
return nil
}
// Unregister removes a remote peer from the active set, disabling any further
// actions to/from that particular entity. It also initiates disconnection at the networking layer.
func (ps *peerSet) Unregister(id string) error {
ps.lock.Lock()
if p, ok := ps.peers[id]; !ok {
ps.lock.Unlock()
return errNotRegistered
} else {
delete(ps.peers, id)
peers := make([]peerSetNotify, len(ps.notifyList))
copy(peers, ps.notifyList)
ps.lock.Unlock()
for _, n := range peers {
n.unregisterPeer(p)
}
p.sendQueue.quit()
p.Peer.Disconnect(p2p.DiscUselessPeer)
return nil
}
}
// AllPeerIDs returns a list of all registered peer IDs
func (ps *peerSet) AllPeerIDs() []string {
ps.lock.RLock()
defer ps.lock.RUnlock()
res := make([]string, len(ps.peers))
idx := 0
for id := range ps.peers {
res[idx] = id
idx++
}
return res
}
// Peer retrieves the registered peer with the given id.
func (ps *peerSet) Peer(id string) *peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
return ps.peers[id]
}
// Len returns if the current number of peers in the set.
func (ps *peerSet) Len() int {
ps.lock.RLock()
defer ps.lock.RUnlock()
return len(ps.peers)
}
// BestPeer retrieves the known peer with the currently highest total difficulty.
func (ps *peerSet) BestPeer() *peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
var (
bestPeer *peer
bestTd *big.Int
)
for _, p := range ps.peers {
if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 {
bestPeer, bestTd = p, td
}
}
return bestPeer
}
// AllPeers returns all peers in a list
func (ps *peerSet) AllPeers() []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, len(ps.peers))
i := 0
for _, peer := range ps.peers {
list[i] = peer
i++
}
return list
}
// Close disconnects all peers.
// No new peers can be registered after Close has returned.
func (ps *peerSet) Close() {
ps.lock.Lock()
defer ps.lock.Unlock()
for _, p := range ps.peers {
p.Disconnect(p2p.DiscQuitting)
}
ps.closed = true
}
| les/peer.go | 1 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.9982485771179199,
0.20716065168380737,
0.0001690501085249707,
0.022144798189401627,
0.34438085556030273
] |
{
"id": 2,
"code_window": [
"\tisOnlyAnnounce bool\n",
"}\n",
"\n",
"func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {\n",
"\tid := p.ID()\n",
"\n",
"\treturn &peer{\n",
"\t\tPeer: p,\n",
"\t\trw: rw,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "les/peer.go",
"type": "replace",
"edit_start_line_idx": 100
} | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package browser provides utilities for interacting with users' browsers.
package browser
import (
"os"
"os/exec"
"runtime"
)
// Commands returns a list of possible commands to use to open a url.
func Commands() [][]string {
var cmds [][]string
if exe := os.Getenv("BROWSER"); exe != "" {
cmds = append(cmds, []string{exe})
}
switch runtime.GOOS {
case "darwin":
cmds = append(cmds, []string{"/usr/bin/open"})
case "windows":
cmds = append(cmds, []string{"cmd", "/c", "start"})
default:
cmds = append(cmds, []string{"xdg-open"})
}
cmds = append(cmds,
[]string{"chrome"},
[]string{"google-chrome"},
[]string{"chromium"},
[]string{"firefox"},
)
return cmds
}
// Open tries to open url in a browser and reports whether it succeeded.
func Open(url string) bool {
for _, args := range Commands() {
cmd := exec.Command(args[0], append(args[1:], url)...)
if cmd.Start() == nil {
return true
}
}
return false
}
| cmd/internal/browser/browser.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.00018229168199468404,
0.00017311629198957235,
0.00016872721607796848,
0.0001692848018137738,
0.000005372844952944433
] |
{
"id": 2,
"code_window": [
"\tisOnlyAnnounce bool\n",
"}\n",
"\n",
"func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {\n",
"\tid := p.ID()\n",
"\n",
"\treturn &peer{\n",
"\t\tPeer: p,\n",
"\t\trw: rw,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "les/peer.go",
"type": "replace",
"edit_start_line_idx": 100
} | package check
import (
"fmt"
"reflect"
"regexp"
)
// -----------------------------------------------------------------------
// CommentInterface and Commentf helper, to attach extra information to checks.
type comment struct {
format string
args []interface{}
}
// Commentf returns an infomational value to use with Assert or Check calls.
// If the checker test fails, the provided arguments will be passed to
// fmt.Sprintf, and will be presented next to the logged failure.
//
// For example:
//
// c.Assert(v, Equals, 42, Commentf("Iteration #%d failed.", i))
//
// Note that if the comment is constant, a better option is to
// simply use a normal comment right above or next to the line, as
// it will also get printed with any errors:
//
// c.Assert(l, Equals, 8192) // Ensure buffer size is correct (bug #123)
//
func Commentf(format string, args ...interface{}) CommentInterface {
return &comment{format, args}
}
// CommentInterface must be implemented by types that attach extra
// information to failed checks. See the Commentf function for details.
type CommentInterface interface {
CheckCommentString() string
}
func (c *comment) CheckCommentString() string {
return fmt.Sprintf(c.format, c.args...)
}
// -----------------------------------------------------------------------
// The Checker interface.
// The Checker interface must be provided by checkers used with
// the Assert and Check verification methods.
type Checker interface {
Info() *CheckerInfo
Check(params []interface{}, names []string) (result bool, error string)
}
// See the Checker interface.
type CheckerInfo struct {
Name string
Params []string
}
func (info *CheckerInfo) Info() *CheckerInfo {
return info
}
// -----------------------------------------------------------------------
// Not checker logic inverter.
// The Not checker inverts the logic of the provided checker. The
// resulting checker will succeed where the original one failed, and
// vice-versa.
//
// For example:
//
// c.Assert(a, Not(Equals), b)
//
func Not(checker Checker) Checker {
return ¬Checker{checker}
}
type notChecker struct {
sub Checker
}
func (checker *notChecker) Info() *CheckerInfo {
info := *checker.sub.Info()
info.Name = "Not(" + info.Name + ")"
return &info
}
func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
result, error = checker.sub.Check(params, names)
result = !result
return
}
// -----------------------------------------------------------------------
// IsNil checker.
type isNilChecker struct {
*CheckerInfo
}
// The IsNil checker tests whether the obtained value is nil.
//
// For example:
//
// c.Assert(err, IsNil)
//
var IsNil Checker = &isNilChecker{
&CheckerInfo{Name: "IsNil", Params: []string{"value"}},
}
func (checker *isNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
return isNil(params[0]), ""
}
func isNil(obtained interface{}) (result bool) {
if obtained == nil {
result = true
} else {
switch v := reflect.ValueOf(obtained); v.Kind() {
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
}
return
}
// -----------------------------------------------------------------------
// NotNil checker. Alias for Not(IsNil), since it's so common.
type notNilChecker struct {
*CheckerInfo
}
// The NotNil checker verifies that the obtained value is not nil.
//
// For example:
//
// c.Assert(iface, NotNil)
//
// This is an alias for Not(IsNil), made available since it's a
// fairly common check.
//
var NotNil Checker = ¬NilChecker{
&CheckerInfo{Name: "NotNil", Params: []string{"value"}},
}
func (checker *notNilChecker) Check(params []interface{}, names []string) (result bool, error string) {
return !isNil(params[0]), ""
}
// -----------------------------------------------------------------------
// Equals checker.
type equalsChecker struct {
*CheckerInfo
}
// The Equals checker verifies that the obtained value is equal to
// the expected value, according to usual Go semantics for ==.
//
// For example:
//
// c.Assert(value, Equals, 42)
//
var Equals Checker = &equalsChecker{
&CheckerInfo{Name: "Equals", Params: []string{"obtained", "expected"}},
}
func (checker *equalsChecker) Check(params []interface{}, names []string) (result bool, error string) {
defer func() {
if v := recover(); v != nil {
result = false
error = fmt.Sprint(v)
}
}()
return params[0] == params[1], ""
}
// -----------------------------------------------------------------------
// DeepEquals checker.
type deepEqualsChecker struct {
*CheckerInfo
}
// The DeepEquals checker verifies that the obtained value is deep-equal to
// the expected value. The check will work correctly even when facing
// slices, interfaces, and values of different types (which always fail
// the test).
//
// For example:
//
// c.Assert(value, DeepEquals, 42)
// c.Assert(array, DeepEquals, []string{"hi", "there"})
//
var DeepEquals Checker = &deepEqualsChecker{
&CheckerInfo{Name: "DeepEquals", Params: []string{"obtained", "expected"}},
}
func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
return reflect.DeepEqual(params[0], params[1]), ""
}
// -----------------------------------------------------------------------
// HasLen checker.
type hasLenChecker struct {
*CheckerInfo
}
// The HasLen checker verifies that the obtained value has the
// provided length. In many cases this is superior to using Equals
// in conjunction with the len function because in case the check
// fails the value itself will be printed, instead of its length,
// providing more details for figuring the problem.
//
// For example:
//
// c.Assert(list, HasLen, 5)
//
var HasLen Checker = &hasLenChecker{
&CheckerInfo{Name: "HasLen", Params: []string{"obtained", "n"}},
}
func (checker *hasLenChecker) Check(params []interface{}, names []string) (result bool, error string) {
n, ok := params[1].(int)
if !ok {
return false, "n must be an int"
}
value := reflect.ValueOf(params[0])
switch value.Kind() {
case reflect.Map, reflect.Array, reflect.Slice, reflect.Chan, reflect.String:
default:
return false, "obtained value type has no length"
}
return value.Len() == n, ""
}
// -----------------------------------------------------------------------
// ErrorMatches checker.
type errorMatchesChecker struct {
*CheckerInfo
}
// The ErrorMatches checker verifies that the error value
// is non nil and matches the regular expression provided.
//
// For example:
//
// c.Assert(err, ErrorMatches, "perm.*denied")
//
var ErrorMatches Checker = errorMatchesChecker{
&CheckerInfo{Name: "ErrorMatches", Params: []string{"value", "regex"}},
}
func (checker errorMatchesChecker) Check(params []interface{}, names []string) (result bool, errStr string) {
if params[0] == nil {
return false, "Error value is nil"
}
err, ok := params[0].(error)
if !ok {
return false, "Value is not an error"
}
params[0] = err.Error()
names[0] = "error"
return matches(params[0], params[1])
}
// -----------------------------------------------------------------------
// Matches checker.
type matchesChecker struct {
*CheckerInfo
}
// The Matches checker verifies that the string provided as the obtained
// value (or the string resulting from obtained.String()) matches the
// regular expression provided.
//
// For example:
//
// c.Assert(err, Matches, "perm.*denied")
//
var Matches Checker = &matchesChecker{
&CheckerInfo{Name: "Matches", Params: []string{"value", "regex"}},
}
func (checker *matchesChecker) Check(params []interface{}, names []string) (result bool, error string) {
return matches(params[0], params[1])
}
func matches(value, regex interface{}) (result bool, error string) {
reStr, ok := regex.(string)
if !ok {
return false, "Regex must be a string"
}
valueStr, valueIsStr := value.(string)
if !valueIsStr {
if valueWithStr, valueHasStr := value.(fmt.Stringer); valueHasStr {
valueStr, valueIsStr = valueWithStr.String(), true
}
}
if valueIsStr {
matches, err := regexp.MatchString("^"+reStr+"$", valueStr)
if err != nil {
return false, "Can't compile regex: " + err.Error()
}
return matches, ""
}
return false, "Obtained value is not a string and has no .String()"
}
// -----------------------------------------------------------------------
// Panics checker.
type panicsChecker struct {
*CheckerInfo
}
// The Panics checker verifies that calling the provided zero-argument
// function will cause a panic which is deep-equal to the provided value.
//
// For example:
//
// c.Assert(func() { f(1, 2) }, Panics, &SomeErrorType{"BOOM"}).
//
//
var Panics Checker = &panicsChecker{
&CheckerInfo{Name: "Panics", Params: []string{"function", "expected"}},
}
func (checker *panicsChecker) Check(params []interface{}, names []string) (result bool, error string) {
f := reflect.ValueOf(params[0])
if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
return false, "Function must take zero arguments"
}
defer func() {
// If the function has not panicked, then don't do the check.
if error != "" {
return
}
params[0] = recover()
names[0] = "panic"
result = reflect.DeepEqual(params[0], params[1])
}()
f.Call(nil)
return false, "Function has not panicked"
}
type panicMatchesChecker struct {
*CheckerInfo
}
// The PanicMatches checker verifies that calling the provided zero-argument
// function will cause a panic with an error value matching
// the regular expression provided.
//
// For example:
//
// c.Assert(func() { f(1, 2) }, PanicMatches, `open.*: no such file or directory`).
//
//
var PanicMatches Checker = &panicMatchesChecker{
&CheckerInfo{Name: "PanicMatches", Params: []string{"function", "expected"}},
}
func (checker *panicMatchesChecker) Check(params []interface{}, names []string) (result bool, errmsg string) {
f := reflect.ValueOf(params[0])
if f.Kind() != reflect.Func || f.Type().NumIn() != 0 {
return false, "Function must take zero arguments"
}
defer func() {
// If the function has not panicked, then don't do the check.
if errmsg != "" {
return
}
obtained := recover()
names[0] = "panic"
if e, ok := obtained.(error); ok {
params[0] = e.Error()
} else if _, ok := obtained.(string); ok {
params[0] = obtained
} else {
errmsg = "Panic value is not a string or an error"
return
}
result, errmsg = matches(params[0], params[1])
}()
f.Call(nil)
return false, "Function has not panicked"
}
// -----------------------------------------------------------------------
// FitsTypeOf checker.
type fitsTypeChecker struct {
*CheckerInfo
}
// The FitsTypeOf checker verifies that the obtained value is
// assignable to a variable with the same type as the provided
// sample value.
//
// For example:
//
// c.Assert(value, FitsTypeOf, int64(0))
// c.Assert(value, FitsTypeOf, os.Error(nil))
//
var FitsTypeOf Checker = &fitsTypeChecker{
&CheckerInfo{Name: "FitsTypeOf", Params: []string{"obtained", "sample"}},
}
func (checker *fitsTypeChecker) Check(params []interface{}, names []string) (result bool, error string) {
obtained := reflect.ValueOf(params[0])
sample := reflect.ValueOf(params[1])
if !obtained.IsValid() {
return false, ""
}
if !sample.IsValid() {
return false, "Invalid sample value"
}
return obtained.Type().AssignableTo(sample.Type()), ""
}
// -----------------------------------------------------------------------
// Implements checker.
type implementsChecker struct {
*CheckerInfo
}
// The Implements checker verifies that the obtained value
// implements the interface specified via a pointer to an interface
// variable.
//
// For example:
//
// var e os.Error
// c.Assert(err, Implements, &e)
//
var Implements Checker = &implementsChecker{
&CheckerInfo{Name: "Implements", Params: []string{"obtained", "ifaceptr"}},
}
func (checker *implementsChecker) Check(params []interface{}, names []string) (result bool, error string) {
obtained := reflect.ValueOf(params[0])
ifaceptr := reflect.ValueOf(params[1])
if !obtained.IsValid() {
return false, ""
}
if !ifaceptr.IsValid() || ifaceptr.Kind() != reflect.Ptr || ifaceptr.Elem().Kind() != reflect.Interface {
return false, "ifaceptr should be a pointer to an interface variable"
}
return obtained.Type().Implements(ifaceptr.Elem().Type()), ""
}
| vendor/gopkg.in/check.v1/checkers.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.0012267931597307324,
0.0003182310319971293,
0.00016670410695951432,
0.00022758993145544082,
0.00023559766123071313
] |
{
"id": 2,
"code_window": [
"\tisOnlyAnnounce bool\n",
"}\n",
"\n",
"func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {\n",
"\tid := p.ID()\n",
"\n",
"\treturn &peer{\n",
"\t\tPeer: p,\n",
"\t\trw: rw,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "les/peer.go",
"type": "replace",
"edit_start_line_idx": 100
} | // This test checks that an error response is sent for calls
// with named parameters.
--> {"jsonrpc":"2.0","method":"test_echo","params":{"int":23},"id":3}
<-- {"jsonrpc":"2.0","id":3,"error":{"code":-32602,"message":"non-array args"}}
| rpc/testdata/reqresp-namedparam.js | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.0001681820722296834,
0.0001681820722296834,
0.0001681820722296834,
0.0001681820722296834,
0
] |
{
"id": 3,
"code_window": [
"\t\trw: rw,\n",
"\t\tversion: version,\n",
"\t\tnetwork: network,\n",
"\t\tid: fmt.Sprintf(\"%x\", id),\n",
"\t\tisTrusted: isTrusted,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\tid: fmt.Sprintf(\"%x\", p.ID().Bytes()),\n"
],
"file_path": "les/peer.go",
"type": "replace",
"edit_start_line_idx": 107
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"errors"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/rlp"
)
var (
errClosed = errors.New("peer set is closed")
errAlreadyRegistered = errors.New("peer is already registered")
errNotRegistered = errors.New("peer is not registered")
errInvalidHelpTrieReq = errors.New("invalid help trie request")
)
const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)
// capacity limitation for parameter updates
const (
allowedUpdateBytes = 100000 // initial/maximum allowed update size
allowedUpdateRate = time.Millisecond * 10 // time constant for recharging one byte of allowance
)
// if the total encoded size of a sent transaction batch is over txSizeCostLimit
// per transaction then the request cost is calculated as proportional to the
// encoded size instead of the transaction count
const txSizeCostLimit = 0x10000
const (
announceTypeNone = iota
announceTypeSimple
announceTypeSigned
)
type peer struct {
*p2p.Peer
rw p2p.MsgReadWriter
version int // Protocol version negotiated
network uint64 // Network ID being on
announceType uint64
id string
headInfo *announceData
lock sync.RWMutex
sendQueue *execQueue
errCh chan error
// responseLock ensures that responses are queued in the same order as
// RequestProcessed is called
responseLock sync.Mutex
responseCount uint64
poolEntry *poolEntry
hasBlock func(common.Hash, uint64, bool) bool
responseErrors int
updateCounter uint64
updateTime mclock.AbsTime
fcClient *flowcontrol.ClientNode // nil if the peer is server only
fcServer *flowcontrol.ServerNode // nil if the peer is client only
fcParams flowcontrol.ServerParams
fcCosts requestCostTable
isTrusted bool
isOnlyAnnounce bool
}
func newPeer(version int, network uint64, isTrusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
id := p.ID()
return &peer{
Peer: p,
rw: rw,
version: version,
network: network,
id: fmt.Sprintf("%x", id),
isTrusted: isTrusted,
}
}
// rejectUpdate returns true if a parameter update has to be rejected because
// the size and/or rate of updates exceed the capacity limitation
func (p *peer) rejectUpdate(size uint64) bool {
now := mclock.Now()
if p.updateCounter == 0 {
p.updateTime = now
} else {
dt := now - p.updateTime
r := uint64(dt / mclock.AbsTime(allowedUpdateRate))
if p.updateCounter > r {
p.updateCounter -= r
p.updateTime += mclock.AbsTime(allowedUpdateRate * time.Duration(r))
} else {
p.updateCounter = 0
p.updateTime = now
}
}
p.updateCounter += size
return p.updateCounter > allowedUpdateBytes
}
func (p *peer) canQueue() bool {
return p.sendQueue.canQueue()
}
func (p *peer) queueSend(f func()) {
p.sendQueue.queue(f)
}
// Info gathers and returns a collection of metadata known about a peer.
func (p *peer) Info() *eth.PeerInfo {
return ð.PeerInfo{
Version: p.version,
Difficulty: p.Td(),
Head: fmt.Sprintf("%x", p.Head()),
}
}
// Head retrieves a copy of the current head (most recent) hash of the peer.
func (p *peer) Head() (hash common.Hash) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.headInfo.Hash[:])
return hash
}
func (p *peer) HeadAndTd() (hash common.Hash, td *big.Int) {
p.lock.RLock()
defer p.lock.RUnlock()
copy(hash[:], p.headInfo.Hash[:])
return hash, p.headInfo.Td
}
func (p *peer) headBlockInfo() blockInfo {
p.lock.RLock()
defer p.lock.RUnlock()
return blockInfo{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td}
}
// Td retrieves the current total difficulty of a peer.
func (p *peer) Td() *big.Int {
p.lock.RLock()
defer p.lock.RUnlock()
return new(big.Int).Set(p.headInfo.Td)
}
// waitBefore implements distPeer interface
func (p *peer) waitBefore(maxCost uint64) (time.Duration, float64) {
return p.fcServer.CanSend(maxCost)
}
// updateCapacity updates the request serving capacity assigned to a given client
// and also sends an announcement about the updated flow control parameters
func (p *peer) updateCapacity(cap uint64) {
p.responseLock.Lock()
defer p.responseLock.Unlock()
p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}
p.fcClient.UpdateParams(p.fcParams)
var kvList keyValueList
kvList = kvList.add("flowControl/MRR", cap)
kvList = kvList.add("flowControl/BL", cap*bufLimitRatio)
p.queueSend(func() { p.SendAnnounce(announceData{Update: kvList}) })
}
func sendRequest(w p2p.MsgWriter, msgcode, reqID, cost uint64, data interface{}) error {
type req struct {
ReqID uint64
Data interface{}
}
return p2p.Send(w, msgcode, req{reqID, data})
}
// reply struct represents a reply with the actual data already RLP encoded and
// only the bv (buffer value) missing. This allows the serving mechanism to
// calculate the bv value which depends on the data size before sending the reply.
type reply struct {
w p2p.MsgWriter
msgcode, reqID uint64
data rlp.RawValue
}
// send sends the reply with the calculated buffer value
func (r *reply) send(bv uint64) error {
type resp struct {
ReqID, BV uint64
Data rlp.RawValue
}
return p2p.Send(r.w, r.msgcode, resp{r.reqID, bv, r.data})
}
// size returns the RLP encoded size of the message data
func (r *reply) size() uint32 {
return uint32(len(r.data))
}
func (p *peer) GetRequestCost(msgcode uint64, amount int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
if cost > p.fcParams.BufLimit {
cost = p.fcParams.BufLimit
}
return cost
}
func (p *peer) GetTxRelayCost(amount, size int) uint64 {
p.lock.RLock()
defer p.lock.RUnlock()
var msgcode uint64
switch p.version {
case lpv1:
msgcode = SendTxMsg
case lpv2:
msgcode = SendTxV2Msg
default:
panic(nil)
}
cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount)
sizeCost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(size)/txSizeCostLimit
if sizeCost > cost {
cost = sizeCost
}
if cost > p.fcParams.BufLimit {
cost = p.fcParams.BufLimit
}
return cost
}
// HasBlock checks if the peer has a given block
func (p *peer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {
p.lock.RLock()
hasBlock := p.hasBlock
p.lock.RUnlock()
return hasBlock != nil && hasBlock(hash, number, hasState)
}
// SendAnnounce announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendAnnounce(request announceData) error {
return p2p.Send(p.rw, AnnounceMsg, request)
}
// ReplyBlockHeaders creates a reply with a batch of block headers
func (p *peer) ReplyBlockHeaders(reqID uint64, headers []*types.Header) *reply {
data, _ := rlp.EncodeToBytes(headers)
return &reply{p.rw, BlockHeadersMsg, reqID, data}
}
// ReplyBlockBodiesRLP creates a reply with a batch of block contents from
// an already RLP encoded format.
func (p *peer) ReplyBlockBodiesRLP(reqID uint64, bodies []rlp.RawValue) *reply {
data, _ := rlp.EncodeToBytes(bodies)
return &reply{p.rw, BlockBodiesMsg, reqID, data}
}
// ReplyCode creates a reply with a batch of arbitrary internal data, corresponding to the
// hashes requested.
func (p *peer) ReplyCode(reqID uint64, codes [][]byte) *reply {
data, _ := rlp.EncodeToBytes(codes)
return &reply{p.rw, CodeMsg, reqID, data}
}
// ReplyReceiptsRLP creates a reply with a batch of transaction receipts, corresponding to the
// ones requested from an already RLP encoded format.
func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {
data, _ := rlp.EncodeToBytes(receipts)
return &reply{p.rw, ReceiptsMsg, reqID, data}
}
// ReplyProofs creates a reply with a batch of legacy LES/1 merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofs(reqID uint64, proofs proofsData) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV1Msg, reqID, data}
}
// ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.
func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, ProofsV2Msg, reqID, data}
}
// ReplyHeaderProofs creates a reply with a batch of legacy LES/1 header proofs, corresponding to the ones requested.
func (p *peer) ReplyHeaderProofs(reqID uint64, proofs []ChtResp) *reply {
data, _ := rlp.EncodeToBytes(proofs)
return &reply{p.rw, HeaderProofsMsg, reqID, data}
}
// ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.
func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {
data, _ := rlp.EncodeToBytes(resp)
return &reply{p.rw, HelperTrieProofsMsg, reqID, data}
}
// ReplyTxStatus creates a reply with a batch of transaction status records, corresponding to the ones requested.
func (p *peer) ReplyTxStatus(reqID uint64, stats []txStatus) *reply {
data, _ := rlp.EncodeToBytes(stats)
return &reply{p.rw, TxStatusMsg, reqID, data}
}
// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the
// specified header query, based on the hash of an origin block.
func (p *peer) RequestHeadersByHash(reqID, cost uint64, origin common.Hash, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse)
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the
// specified header query, based on the number of an origin block.
func (p *peer) RequestHeadersByNumber(reqID, cost, origin uint64, amount int, skip int, reverse bool) error {
p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse)
return sendRequest(p.rw, GetBlockHeadersMsg, reqID, cost, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse})
}
// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes
// specified.
func (p *peer) RequestBodies(reqID, cost uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of block bodies", "count", len(hashes))
return sendRequest(p.rw, GetBlockBodiesMsg, reqID, cost, hashes)
}
// RequestCode fetches a batch of arbitrary data from a node's known state
// data, corresponding to the specified hashes.
func (p *peer) RequestCode(reqID, cost uint64, reqs []CodeReq) error {
p.Log().Debug("Fetching batch of codes", "count", len(reqs))
return sendRequest(p.rw, GetCodeMsg, reqID, cost, reqs)
}
// RequestReceipts fetches a batch of transaction receipts from a remote node.
func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error {
p.Log().Debug("Fetching batch of receipts", "count", len(hashes))
return sendRequest(p.rw, GetReceiptsMsg, reqID, cost, hashes)
}
// RequestProofs fetches a batch of merkle proofs from a remote node.
func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error {
p.Log().Debug("Fetching batch of proofs", "count", len(reqs))
switch p.version {
case lpv1:
return sendRequest(p.rw, GetProofsV1Msg, reqID, cost, reqs)
case lpv2:
return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs)
default:
panic(nil)
}
}
// RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.
func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, data interface{}) error {
switch p.version {
case lpv1:
reqs, ok := data.([]ChtReq)
if !ok {
return errInvalidHelpTrieReq
}
p.Log().Debug("Fetching batch of header proofs", "count", len(reqs))
return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs)
case lpv2:
reqs, ok := data.([]HelperTrieReq)
if !ok {
return errInvalidHelpTrieReq
}
p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs))
return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs)
default:
panic(nil)
}
}
// RequestTxStatus fetches a batch of transaction status records from a remote node.
func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error {
p.Log().Debug("Requesting transaction status", "count", len(txHashes))
return sendRequest(p.rw, GetTxStatusMsg, reqID, cost, txHashes)
}
// SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool.
func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error {
p.Log().Debug("Sending batch of transactions", "size", len(txs))
switch p.version {
case lpv1:
return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID
case lpv2:
return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs)
default:
panic(nil)
}
}
type keyValueEntry struct {
Key string
Value rlp.RawValue
}
type keyValueList []keyValueEntry
type keyValueMap map[string]rlp.RawValue
func (l keyValueList) add(key string, val interface{}) keyValueList {
var entry keyValueEntry
entry.Key = key
if val == nil {
val = uint64(0)
}
enc, err := rlp.EncodeToBytes(val)
if err == nil {
entry.Value = enc
}
return append(l, entry)
}
func (l keyValueList) decode() (keyValueMap, uint64) {
m := make(keyValueMap)
var size uint64
for _, entry := range l {
m[entry.Key] = entry.Value
size += uint64(len(entry.Key)) + uint64(len(entry.Value)) + 8
}
return m, size
}
func (m keyValueMap) get(key string, val interface{}) error {
enc, ok := m[key]
if !ok {
return errResp(ErrMissingKey, "%s", key)
}
if val == nil {
return nil
}
return rlp.DecodeBytes(enc, val)
}
func (p *peer) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) {
// Send out own handshake in a new thread
errc := make(chan error, 1)
go func() {
errc <- p2p.Send(p.rw, StatusMsg, sendList)
}()
// In the mean time retrieve the remote status message
msg, err := p.rw.ReadMsg()
if err != nil {
return nil, err
}
if msg.Code != StatusMsg {
return nil, errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg)
}
if msg.Size > ProtocolMaxMsgSize {
return nil, errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
// Decode the handshake
var recvList keyValueList
if err := msg.Decode(&recvList); err != nil {
return nil, errResp(ErrDecode, "msg %v: %v", msg, err)
}
if err := <-errc; err != nil {
return nil, err
}
return recvList, nil
}
// Handshake executes the les protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error {
p.lock.Lock()
defer p.lock.Unlock()
var send keyValueList
send = send.add("protocolVersion", uint64(p.version))
send = send.add("networkId", p.network)
send = send.add("headTd", td)
send = send.add("headHash", head)
send = send.add("headNum", headNum)
send = send.add("genesisHash", genesis)
if server != nil {
if !server.onlyAnnounce {
//only announce server. It sends only announse requests
send = send.add("serveHeaders", nil)
send = send.add("serveChainSince", uint64(0))
send = send.add("serveStateSince", uint64(0))
send = send.add("txRelay", nil)
}
send = send.add("flowControl/BL", server.defParams.BufLimit)
send = send.add("flowControl/MRR", server.defParams.MinRecharge)
var costList RequestCostList
if server.costTracker != nil {
costList = server.costTracker.makeCostList()
} else {
costList = testCostList()
}
send = send.add("flowControl/MRC", costList)
p.fcCosts = costList.decode()
p.fcParams = server.defParams
} else {
//on client node
p.announceType = announceTypeSimple
if p.isTrusted {
p.announceType = announceTypeSigned
}
send = send.add("announceType", p.announceType)
}
recvList, err := p.sendReceiveHandshake(send)
if err != nil {
return err
}
recv, size := recvList.decode()
if p.rejectUpdate(size) {
return errResp(ErrRequestRejected, "")
}
var rGenesis, rHash common.Hash
var rVersion, rNetwork, rNum uint64
var rTd *big.Int
if err := recv.get("protocolVersion", &rVersion); err != nil {
return err
}
if err := recv.get("networkId", &rNetwork); err != nil {
return err
}
if err := recv.get("headTd", &rTd); err != nil {
return err
}
if err := recv.get("headHash", &rHash); err != nil {
return err
}
if err := recv.get("headNum", &rNum); err != nil {
return err
}
if err := recv.get("genesisHash", &rGenesis); err != nil {
return err
}
if rGenesis != genesis {
return errResp(ErrGenesisBlockMismatch, "%x (!= %x)", rGenesis[:8], genesis[:8])
}
if rNetwork != p.network {
return errResp(ErrNetworkIdMismatch, "%d (!= %d)", rNetwork, p.network)
}
if int(rVersion) != p.version {
return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version)
}
if server != nil {
// until we have a proper peer connectivity API, allow LES connection to other servers
/*if recv.get("serveStateSince", nil) == nil {
return errResp(ErrUselessPeer, "wanted client, got server")
}*/
if recv.get("announceType", &p.announceType) != nil {
//set default announceType on server side
p.announceType = announceTypeSimple
}
p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
} else {
//mark OnlyAnnounce server if "serveHeaders", "serveChainSince", "serveStateSince" or "txRelay" fields don't exist
if recv.get("serveChainSince", nil) != nil {
p.isOnlyAnnounce = true
}
if recv.get("serveStateSince", nil) != nil {
p.isOnlyAnnounce = true
}
if recv.get("txRelay", nil) != nil {
p.isOnlyAnnounce = true
}
if p.isOnlyAnnounce && !p.isTrusted {
return errResp(ErrUselessPeer, "peer cannot serve requests")
}
var params flowcontrol.ServerParams
if err := recv.get("flowControl/BL", ¶ms.BufLimit); err != nil {
return err
}
if err := recv.get("flowControl/MRR", ¶ms.MinRecharge); err != nil {
return err
}
var MRC RequestCostList
if err := recv.get("flowControl/MRC", &MRC); err != nil {
return err
}
p.fcParams = params
p.fcServer = flowcontrol.NewServerNode(params, &mclock.System{})
p.fcCosts = MRC.decode()
}
p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}
return nil
}
// updateFlowControl updates the flow control parameters belonging to the server
// node if the announced key/value set contains relevant fields
func (p *peer) updateFlowControl(update keyValueMap) {
if p.fcServer == nil {
return
}
params := p.fcParams
updateParams := false
if update.get("flowControl/BL", ¶ms.BufLimit) == nil {
updateParams = true
}
if update.get("flowControl/MRR", ¶ms.MinRecharge) == nil {
updateParams = true
}
if updateParams {
p.fcParams = params
p.fcServer.UpdateParams(params)
}
var MRC RequestCostList
if update.get("flowControl/MRC", &MRC) == nil {
p.fcCosts = MRC.decode()
}
}
// String implements fmt.Stringer.
func (p *peer) String() string {
return fmt.Sprintf("Peer %s [%s]", p.id,
fmt.Sprintf("les/%d", p.version),
)
}
// peerSetNotify is a callback interface to notify services about added or
// removed peers
type peerSetNotify interface {
registerPeer(*peer)
unregisterPeer(*peer)
}
// peerSet represents the collection of active peers currently participating in
// the Light Ethereum sub-protocol.
type peerSet struct {
peers map[string]*peer
lock sync.RWMutex
notifyList []peerSetNotify
closed bool
}
// newPeerSet creates a new peer set to track the active participants.
func newPeerSet() *peerSet {
return &peerSet{
peers: make(map[string]*peer),
}
}
// notify adds a service to be notified about added or removed peers
func (ps *peerSet) notify(n peerSetNotify) {
ps.lock.Lock()
ps.notifyList = append(ps.notifyList, n)
peers := make([]*peer, 0, len(ps.peers))
for _, p := range ps.peers {
peers = append(peers, p)
}
ps.lock.Unlock()
for _, p := range peers {
n.registerPeer(p)
}
}
// Register injects a new peer into the working set, or returns an error if the
// peer is already known.
func (ps *peerSet) Register(p *peer) error {
ps.lock.Lock()
if ps.closed {
ps.lock.Unlock()
return errClosed
}
if _, ok := ps.peers[p.id]; ok {
ps.lock.Unlock()
return errAlreadyRegistered
}
ps.peers[p.id] = p
p.sendQueue = newExecQueue(100)
peers := make([]peerSetNotify, len(ps.notifyList))
copy(peers, ps.notifyList)
ps.lock.Unlock()
for _, n := range peers {
n.registerPeer(p)
}
return nil
}
// Unregister removes a remote peer from the active set, disabling any further
// actions to/from that particular entity. It also initiates disconnection at the networking layer.
func (ps *peerSet) Unregister(id string) error {
ps.lock.Lock()
if p, ok := ps.peers[id]; !ok {
ps.lock.Unlock()
return errNotRegistered
} else {
delete(ps.peers, id)
peers := make([]peerSetNotify, len(ps.notifyList))
copy(peers, ps.notifyList)
ps.lock.Unlock()
for _, n := range peers {
n.unregisterPeer(p)
}
p.sendQueue.quit()
p.Peer.Disconnect(p2p.DiscUselessPeer)
return nil
}
}
// AllPeerIDs returns a list of all registered peer IDs
func (ps *peerSet) AllPeerIDs() []string {
ps.lock.RLock()
defer ps.lock.RUnlock()
res := make([]string, len(ps.peers))
idx := 0
for id := range ps.peers {
res[idx] = id
idx++
}
return res
}
// Peer retrieves the registered peer with the given id.
func (ps *peerSet) Peer(id string) *peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
return ps.peers[id]
}
// Len returns if the current number of peers in the set.
func (ps *peerSet) Len() int {
ps.lock.RLock()
defer ps.lock.RUnlock()
return len(ps.peers)
}
// BestPeer retrieves the known peer with the currently highest total difficulty.
func (ps *peerSet) BestPeer() *peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
var (
bestPeer *peer
bestTd *big.Int
)
for _, p := range ps.peers {
if td := p.Td(); bestPeer == nil || td.Cmp(bestTd) > 0 {
bestPeer, bestTd = p, td
}
}
return bestPeer
}
// AllPeers returns all peers in a list
func (ps *peerSet) AllPeers() []*peer {
ps.lock.RLock()
defer ps.lock.RUnlock()
list := make([]*peer, len(ps.peers))
i := 0
for _, peer := range ps.peers {
list[i] = peer
i++
}
return list
}
// Close disconnects all peers.
// No new peers can be registered after Close has returned.
func (ps *peerSet) Close() {
ps.lock.Lock()
defer ps.lock.Unlock()
for _, p := range ps.peers {
p.Disconnect(p2p.DiscQuitting)
}
ps.closed = true
}
| les/peer.go | 1 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.9546393752098083,
0.012536067515611649,
0.00016230142500717193,
0.00017435054178349674,
0.1047060564160347
] |
{
"id": 3,
"code_window": [
"\t\trw: rw,\n",
"\t\tversion: version,\n",
"\t\tnetwork: network,\n",
"\t\tid: fmt.Sprintf(\"%x\", id),\n",
"\t\tisTrusted: isTrusted,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\tid: fmt.Sprintf(\"%x\", p.ID().Bytes()),\n"
],
"file_path": "les/peer.go",
"type": "replace",
"edit_start_line_idx": 107
} | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package params
// These are the multipliers for ether denominations.
// Example: To get the wei value of an amount in 'gwei', use
//
// new(big.Int).Mul(value, big.NewInt(params.GWei))
//
const (
Wei = 1
GWei = 1e9
Ether = 1e18
)
| params/denomination.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.00017722643679007888,
0.0001764084881870076,
0.0001755620032781735,
0.00017643699538893998,
6.798012464059866e-7
] |
{
"id": 3,
"code_window": [
"\t\trw: rw,\n",
"\t\tversion: version,\n",
"\t\tnetwork: network,\n",
"\t\tid: fmt.Sprintf(\"%x\", id),\n",
"\t\tisTrusted: isTrusted,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\tid: fmt.Sprintf(\"%x\", p.ID().Bytes()),\n"
],
"file_path": "les/peer.go",
"type": "replace",
"edit_start_line_idx": 107
} | package selected
import (
"fmt"
"reflect"
"sync"
"github.com/graph-gophers/graphql-go/errors"
"github.com/graph-gophers/graphql-go/internal/common"
"github.com/graph-gophers/graphql-go/internal/exec/packer"
"github.com/graph-gophers/graphql-go/internal/exec/resolvable"
"github.com/graph-gophers/graphql-go/internal/query"
"github.com/graph-gophers/graphql-go/internal/schema"
"github.com/graph-gophers/graphql-go/introspection"
)
type Request struct {
Schema *schema.Schema
Doc *query.Document
Vars map[string]interface{}
Mu sync.Mutex
Errs []*errors.QueryError
}
func (r *Request) AddError(err *errors.QueryError) {
r.Mu.Lock()
r.Errs = append(r.Errs, err)
r.Mu.Unlock()
}
func ApplyOperation(r *Request, s *resolvable.Schema, op *query.Operation) []Selection {
var obj *resolvable.Object
switch op.Type {
case query.Query:
obj = s.Query.(*resolvable.Object)
case query.Mutation:
obj = s.Mutation.(*resolvable.Object)
}
return applySelectionSet(r, obj, op.Selections)
}
type Selection interface {
isSelection()
}
type SchemaField struct {
resolvable.Field
Alias string
Args map[string]interface{}
PackedArgs reflect.Value
Sels []Selection
Async bool
FixedResult reflect.Value
}
type TypeAssertion struct {
resolvable.TypeAssertion
Sels []Selection
}
type TypenameField struct {
resolvable.Object
Alias string
}
func (*SchemaField) isSelection() {}
func (*TypeAssertion) isSelection() {}
func (*TypenameField) isSelection() {}
func applySelectionSet(r *Request, e *resolvable.Object, sels []query.Selection) (flattenedSels []Selection) {
for _, sel := range sels {
switch sel := sel.(type) {
case *query.Field:
field := sel
if skipByDirective(r, field.Directives) {
continue
}
switch field.Name.Name {
case "__typename":
flattenedSels = append(flattenedSels, &TypenameField{
Object: *e,
Alias: field.Alias.Name,
})
case "__schema":
flattenedSels = append(flattenedSels, &SchemaField{
Field: resolvable.MetaFieldSchema,
Alias: field.Alias.Name,
Sels: applySelectionSet(r, resolvable.MetaSchema, field.Selections),
Async: true,
FixedResult: reflect.ValueOf(introspection.WrapSchema(r.Schema)),
})
case "__type":
p := packer.ValuePacker{ValueType: reflect.TypeOf("")}
v, err := p.Pack(field.Arguments.MustGet("name").Value(r.Vars))
if err != nil {
r.AddError(errors.Errorf("%s", err))
return nil
}
t, ok := r.Schema.Types[v.String()]
if !ok {
return nil
}
flattenedSels = append(flattenedSels, &SchemaField{
Field: resolvable.MetaFieldType,
Alias: field.Alias.Name,
Sels: applySelectionSet(r, resolvable.MetaType, field.Selections),
Async: true,
FixedResult: reflect.ValueOf(introspection.WrapType(t)),
})
default:
fe := e.Fields[field.Name.Name]
var args map[string]interface{}
var packedArgs reflect.Value
if fe.ArgsPacker != nil {
args = make(map[string]interface{})
for _, arg := range field.Arguments {
args[arg.Name.Name] = arg.Value.Value(r.Vars)
}
var err error
packedArgs, err = fe.ArgsPacker.Pack(args)
if err != nil {
r.AddError(errors.Errorf("%s", err))
return
}
}
fieldSels := applyField(r, fe.ValueExec, field.Selections)
flattenedSels = append(flattenedSels, &SchemaField{
Field: *fe,
Alias: field.Alias.Name,
Args: args,
PackedArgs: packedArgs,
Sels: fieldSels,
Async: fe.HasContext || fe.ArgsPacker != nil || fe.HasError || HasAsyncSel(fieldSels),
})
}
case *query.InlineFragment:
frag := sel
if skipByDirective(r, frag.Directives) {
continue
}
flattenedSels = append(flattenedSels, applyFragment(r, e, &frag.Fragment)...)
case *query.FragmentSpread:
spread := sel
if skipByDirective(r, spread.Directives) {
continue
}
flattenedSels = append(flattenedSels, applyFragment(r, e, &r.Doc.Fragments.Get(spread.Name.Name).Fragment)...)
default:
panic("invalid type")
}
}
return
}
func applyFragment(r *Request, e *resolvable.Object, frag *query.Fragment) []Selection {
if frag.On.Name != "" && frag.On.Name != e.Name {
a, ok := e.TypeAssertions[frag.On.Name]
if !ok {
panic(fmt.Errorf("%q does not implement %q", frag.On, e.Name)) // TODO proper error handling
}
return []Selection{&TypeAssertion{
TypeAssertion: *a,
Sels: applySelectionSet(r, a.TypeExec.(*resolvable.Object), frag.Selections),
}}
}
return applySelectionSet(r, e, frag.Selections)
}
func applyField(r *Request, e resolvable.Resolvable, sels []query.Selection) []Selection {
switch e := e.(type) {
case *resolvable.Object:
return applySelectionSet(r, e, sels)
case *resolvable.List:
return applyField(r, e.Elem, sels)
case *resolvable.Scalar:
return nil
default:
panic("unreachable")
}
}
func skipByDirective(r *Request, directives common.DirectiveList) bool {
if d := directives.Get("skip"); d != nil {
p := packer.ValuePacker{ValueType: reflect.TypeOf(false)}
v, err := p.Pack(d.Args.MustGet("if").Value(r.Vars))
if err != nil {
r.AddError(errors.Errorf("%s", err))
}
if err == nil && v.Bool() {
return true
}
}
if d := directives.Get("include"); d != nil {
p := packer.ValuePacker{ValueType: reflect.TypeOf(false)}
v, err := p.Pack(d.Args.MustGet("if").Value(r.Vars))
if err != nil {
r.AddError(errors.Errorf("%s", err))
}
if err == nil && !v.Bool() {
return true
}
}
return false
}
func HasAsyncSel(sels []Selection) bool {
for _, sel := range sels {
switch sel := sel.(type) {
case *SchemaField:
if sel.Async {
return true
}
case *TypeAssertion:
if HasAsyncSel(sel.Sels) {
return true
}
case *TypenameField:
// sync
default:
panic("unreachable")
}
}
return false
}
| vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.00018034085223916918,
0.00017173087690025568,
0.00016542625962756574,
0.0001715344114927575,
0.0000030403984965232667
] |
{
"id": 3,
"code_window": [
"\t\trw: rw,\n",
"\t\tversion: version,\n",
"\t\tnetwork: network,\n",
"\t\tid: fmt.Sprintf(\"%x\", id),\n",
"\t\tisTrusted: isTrusted,\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep"
],
"after_edit": [
"\t\tid: fmt.Sprintf(\"%x\", p.ID().Bytes()),\n"
],
"file_path": "les/peer.go",
"type": "replace",
"edit_start_line_idx": 107
} | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gen.go
// Package identifier defines the contract between implementations of Encoding
// and Index by defining identifiers that uniquely identify standardized coded
// character sets (CCS) and character encoding schemes (CES), which we will
// together refer to as encodings, for which Encoding implementations provide
// converters to and from UTF-8. This package is typically only of concern to
// implementers of Indexes and Encodings.
//
// One part of the identifier is the MIB code, which is defined by IANA and
// uniquely identifies a CCS or CES. Each code is associated with data that
// references authorities, official documentation as well as aliases and MIME
// names.
//
// Not all CESs are covered by the IANA registry. The "other" string that is
// returned by ID can be used to identify other character sets or versions of
// existing ones.
//
// It is recommended that each package that provides a set of Encodings provide
// the All and Common variables to reference all supported encodings and
// commonly used subset. This allows Index implementations to include all
// available encodings without explicitly referencing or knowing about them.
package identifier
// Note: this package is internal, but could be made public if there is a need
// for writing third-party Indexes and Encodings.
// References:
// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
// - http://www.ietf.org/rfc/rfc2978.txt
// - http://www.unicode.org/reports/tr22/
// - http://www.w3.org/TR/encoding/
// - https://encoding.spec.whatwg.org/
// - https://encoding.spec.whatwg.org/encodings.json
// - https://tools.ietf.org/html/rfc6657#section-5
// Interface can be implemented by Encodings to define the CCS or CES for which
// it implements conversions.
type Interface interface {
// ID returns an encoding identifier. Exactly one of the mib and other
// values should be non-zero.
//
// In the usual case it is only necessary to indicate the MIB code. The
// other string can be used to specify encodings for which there is no MIB,
// such as "x-mac-dingbat".
//
// The other string may only contain the characters a-z, A-Z, 0-9, - and _.
ID() (mib MIB, other string)
// NOTE: the restrictions on the encoding are to allow extending the syntax
// with additional information such as versions, vendors and other variants.
}
// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
// some identifiers for some encodings that are not covered by the IANA
// standard.
//
// See http://www.iana.org/assignments/ianacharset-mib.
type MIB uint16
// These additional MIB types are not defined in IANA. They are added because
// they are common and defined within the text repo.
const (
// Unofficial marks the start of encodings not registered by IANA.
Unofficial MIB = 10000 + iota
// Replacement is the WhatWG replacement encoding.
Replacement
// XUserDefined is the code for x-user-defined.
XUserDefined
// MacintoshCyrillic is the code for x-mac-cyrillic.
MacintoshCyrillic
)
| vendor/golang.org/x/text/encoding/internal/identifier/identifier.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.002557415282353759,
0.0004818284360226244,
0.00016202233382500708,
0.0001733010431053117,
0.0007440326735377312
] |
{
"id": 4,
"code_window": [
"\t\tisTrusted: isTrusted,\n",
"\t}\n",
"}\n",
"\n",
"// rejectUpdate returns true if a parameter update has to be rejected because\n",
"// the size and/or rate of updates exceed the capacity limitation\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terrCh: make(chan error, 1),\n"
],
"file_path": "les/peer.go",
"type": "add",
"edit_start_line_idx": 109
} | // Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package les
import (
"encoding/binary"
"encoding/json"
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discv5"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
)
const (
softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header
ethVersion = 63 // equivalent eth version for the downloader
MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request
MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request
MaxReceiptFetch = 128 // Amount of transaction receipts to allow fetching per request
MaxCodeFetch = 64 // Amount of contract codes to allow fetching per request
MaxProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
MaxHelperTrieProofsFetch = 64 // Amount of merkle proofs to be fetched per retrieval request
MaxTxSend = 64 // Amount of transactions to be send per request
MaxTxStatus = 256 // Amount of transactions to queried per request
disableClientRemovePeer = false
)
func errResp(code errCode, format string, v ...interface{}) error {
return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
}
type BlockChain interface {
Config() *params.ChainConfig
HasHeader(hash common.Hash, number uint64) bool
GetHeader(hash common.Hash, number uint64) *types.Header
GetHeaderByHash(hash common.Hash) *types.Header
CurrentHeader() *types.Header
GetTd(hash common.Hash, number uint64) *big.Int
StateCache() state.Database
InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error)
Rollback(chain []common.Hash)
GetHeaderByNumber(number uint64) *types.Header
GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64)
Genesis() *types.Block
SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription
}
type txPool interface {
AddRemotes(txs []*types.Transaction) []error
Status(hashes []common.Hash) []core.TxStatus
}
type ProtocolManager struct {
lightSync bool
txpool txPool
txrelay *LesTxRelay
networkId uint64
chainConfig *params.ChainConfig
iConfig *light.IndexerConfig
blockchain BlockChain
chainDb ethdb.Database
odr *LesOdr
server *LesServer
serverPool *serverPool
lesTopic discv5.Topic
reqDist *requestDistributor
retriever *retrieveManager
servingQueue *servingQueue
downloader *downloader.Downloader
fetcher *lightFetcher
peers *peerSet
maxPeers int
eventMux *event.TypeMux
// channels for fetcher, syncer, txsyncLoop
newPeerCh chan *peer
quitSync chan struct{}
noMorePeers chan struct{}
// wait group is used for graceful shutdowns during downloading
// and processing
wg *sync.WaitGroup
ulc *ulc
}
// NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
// with the ethereum network.
func NewProtocolManager(
chainConfig *params.ChainConfig,
indexerConfig *light.IndexerConfig,
lightSync bool,
networkId uint64,
mux *event.TypeMux,
engine consensus.Engine,
peers *peerSet,
blockchain BlockChain,
txpool txPool,
chainDb ethdb.Database,
odr *LesOdr,
txrelay *LesTxRelay,
serverPool *serverPool,
quitSync chan struct{},
wg *sync.WaitGroup,
ulcConfig *eth.ULCConfig) (*ProtocolManager, error) {
// Create the protocol manager with the base fields
manager := &ProtocolManager{
lightSync: lightSync,
eventMux: mux,
blockchain: blockchain,
chainConfig: chainConfig,
iConfig: indexerConfig,
chainDb: chainDb,
odr: odr,
networkId: networkId,
txpool: txpool,
txrelay: txrelay,
serverPool: serverPool,
peers: peers,
newPeerCh: make(chan *peer),
quitSync: quitSync,
wg: wg,
noMorePeers: make(chan struct{}),
}
if odr != nil {
manager.retriever = odr.retriever
manager.reqDist = odr.retriever.dist
} else {
manager.servingQueue = newServingQueue(int64(time.Millisecond * 10))
}
if ulcConfig != nil {
manager.ulc = newULC(ulcConfig)
}
removePeer := manager.removePeer
if disableClientRemovePeer {
removePeer = func(id string) {}
}
if lightSync {
manager.downloader = downloader.New(downloader.LightSync, chainDb, manager.eventMux, nil, blockchain, removePeer)
manager.peers.notify((*downloaderPeerNotify)(manager))
manager.fetcher = newLightFetcher(manager)
}
return manager, nil
}
// removePeer initiates disconnection from a peer by removing it from the peer set
func (pm *ProtocolManager) removePeer(id string) {
pm.peers.Unregister(id)
}
func (pm *ProtocolManager) Start(maxPeers int) {
pm.maxPeers = maxPeers
if pm.lightSync {
go pm.syncer()
} else {
go func() {
for range pm.newPeerCh {
}
}()
}
}
func (pm *ProtocolManager) Stop() {
// Showing a log message. During download / process this could actually
// take between 5 to 10 seconds and therefor feedback is required.
log.Info("Stopping light Ethereum protocol")
// Quit the sync loop.
// After this send has completed, no new peers will be accepted.
pm.noMorePeers <- struct{}{}
close(pm.quitSync) // quits syncer, fetcher
if pm.servingQueue != nil {
pm.servingQueue.stop()
}
// Disconnect existing sessions.
// This also closes the gate for any new registrations on the peer set.
// sessions which are already established but not added to pm.peers yet
// will exit when they try to register.
pm.peers.Close()
// Wait for any process action
pm.wg.Wait()
log.Info("Light Ethereum protocol stopped")
}
// runPeer is the p2p protocol run function for the given version.
func (pm *ProtocolManager) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {
var entry *poolEntry
peer := pm.newPeer(int(version), pm.networkId, p, rw)
if pm.serverPool != nil {
entry = pm.serverPool.connect(peer, peer.Node())
}
peer.poolEntry = entry
select {
case pm.newPeerCh <- peer:
pm.wg.Add(1)
defer pm.wg.Done()
err := pm.handle(peer)
if entry != nil {
pm.serverPool.disconnect(entry)
}
return err
case <-pm.quitSync:
if entry != nil {
pm.serverPool.disconnect(entry)
}
return p2p.DiscQuitting
}
}
func (pm *ProtocolManager) newPeer(pv int, nv uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
var isTrusted bool
if pm.isULCEnabled() {
isTrusted = pm.ulc.isTrusted(p.ID())
}
return newPeer(pv, nv, isTrusted, p, newMeteredMsgWriter(rw))
}
// handle is the callback invoked to manage the life cycle of a les peer. When
// this function terminates, the peer is disconnected.
func (pm *ProtocolManager) handle(p *peer) error {
// Ignore maxPeers if this is a trusted peer
// In server mode we try to check into the client pool after handshake
if pm.lightSync && pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
return p2p.DiscTooManyPeers
}
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
// Execute the LES handshake
var (
genesis = pm.blockchain.Genesis()
head = pm.blockchain.CurrentHeader()
hash = head.Hash()
number = head.Number.Uint64()
td = pm.blockchain.GetTd(hash, number)
)
if err := p.Handshake(td, hash, number, genesis.Hash(), pm.server); err != nil {
p.Log().Debug("Light Ethereum handshake failed", "err", err)
return err
}
if p.fcClient != nil {
defer p.fcClient.Disconnect()
}
if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
rw.Init(p.version)
}
// Register the peer locally
if err := pm.peers.Register(p); err != nil {
p.Log().Error("Light Ethereum peer registration failed", "err", err)
return err
}
defer func() {
pm.removePeer(p.id)
}()
// Register the peer in the downloader. If the downloader considers it banned, we disconnect
if pm.lightSync {
p.lock.Lock()
head := p.headInfo
p.lock.Unlock()
if pm.fetcher != nil {
pm.fetcher.announce(p, head)
}
if p.poolEntry != nil {
pm.serverPool.registered(p.poolEntry)
}
}
// main loop. handle incoming messages.
for {
if err := pm.handleMsg(p); err != nil {
p.Log().Debug("Light Ethereum message handling failed", "err", err)
if p.fcServer != nil {
p.fcServer.DumpLogs()
}
return err
}
}
}
// handleMsg is invoked whenever an inbound message is received from a remote
// peer. The remote connection is torn down upon returning any error.
func (pm *ProtocolManager) handleMsg(p *peer) error {
// Read the next message from the remote peer, and ensure it's fully consumed
msg, err := p.rw.ReadMsg()
if err != nil {
return err
}
p.Log().Trace("Light Ethereum message arrived", "code", msg.Code, "bytes", msg.Size)
p.responseCount++
responseCount := p.responseCount
var (
maxCost uint64
task *servingTask
)
accept := func(reqID, reqCnt, maxCnt uint64) bool {
if reqCnt == 0 {
return false
}
if p.fcClient == nil || reqCnt > maxCnt {
return false
}
maxCost = p.fcCosts.getCost(msg.Code, reqCnt)
if accepted, bufShort, servingPriority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost); !accepted {
if bufShort > 0 {
p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))
}
return false
} else {
task = pm.servingQueue.newTask(servingPriority)
}
return task.start()
}
if msg.Size > ProtocolMaxMsgSize {
return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
}
defer msg.Discard()
var deliverMsg *Msg
sendResponse := func(reqID, amount uint64, reply *reply, servingTime uint64) {
p.responseLock.Lock()
defer p.responseLock.Unlock()
var replySize uint32
if reply != nil {
replySize = reply.size()
}
var realCost uint64
if pm.server.costTracker != nil {
realCost = pm.server.costTracker.realCost(servingTime, msg.Size, replySize)
pm.server.costTracker.updateStats(msg.Code, amount, servingTime, realCost)
} else {
realCost = maxCost
}
bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)
if reply != nil {
p.queueSend(func() {
if err := reply.send(bv); err != nil {
p.errCh <- err
}
})
}
}
// Handle the message depending on its contents
switch msg.Code {
case StatusMsg:
p.Log().Trace("Received status message")
// Status messages should never arrive after the handshake
return errResp(ErrExtraStatusMsg, "uncontrolled status message")
// Block header query, collect the requested headers and reply
case AnnounceMsg:
p.Log().Trace("Received announce message")
var req announceData
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
update, size := req.Update.decode()
if p.rejectUpdate(size) {
return errResp(ErrRequestRejected, "")
}
p.updateFlowControl(update)
if req.Hash != (common.Hash{}) {
if p.announceType == announceTypeNone {
return errResp(ErrUnexpectedResponse, "")
}
if p.announceType == announceTypeSigned {
if err := req.checkSignature(p.ID(), update); err != nil {
p.Log().Trace("Invalid announcement signature", "err", err)
return err
}
p.Log().Trace("Valid announcement signature")
}
p.Log().Trace("Announce message content", "number", req.Number, "hash", req.Hash, "td", req.Td, "reorg", req.ReorgDepth)
if pm.fetcher != nil {
pm.fetcher.announce(p, &req)
}
}
case GetBlockHeadersMsg:
p.Log().Trace("Received block header request")
// Decode the complex header query
var req struct {
ReqID uint64
Query getBlockHeadersData
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
query := req.Query
if !accept(req.ReqID, query.Amount, MaxHeaderFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
hashMode := query.Origin.Hash != (common.Hash{})
first := true
maxNonCanonical := uint64(100)
// Gather headers until the fetch or network limits is reached
var (
bytes common.StorageSize
headers []*types.Header
unknown bool
)
for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit {
if !first && !task.waitOrStop() {
return
}
// Retrieve the next header satisfying the query
var origin *types.Header
if hashMode {
if first {
origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
if origin != nil {
query.Origin.Number = origin.Number.Uint64()
}
} else {
origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number)
}
} else {
origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
}
if origin == nil {
break
}
headers = append(headers, origin)
bytes += estHeaderRlpSize
// Advance to the next header of the query
switch {
case hashMode && query.Reverse:
// Hash based traversal towards the genesis block
ancestor := query.Skip + 1
if ancestor == 0 {
unknown = true
} else {
query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)
unknown = (query.Origin.Hash == common.Hash{})
}
case hashMode && !query.Reverse:
// Hash based traversal towards the leaf block
var (
current = origin.Number.Uint64()
next = current + query.Skip + 1
)
if next <= current {
infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ")
p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
unknown = true
} else {
if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
nextHash := header.Hash()
expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)
if expOldHash == query.Origin.Hash {
query.Origin.Hash, query.Origin.Number = nextHash, next
} else {
unknown = true
}
} else {
unknown = true
}
}
case query.Reverse:
// Number based traversal towards the genesis block
if query.Origin.Number >= query.Skip+1 {
query.Origin.Number -= query.Skip + 1
} else {
unknown = true
}
case !query.Reverse:
// Number based traversal towards the leaf block
query.Origin.Number += query.Skip + 1
}
first = false
}
sendResponse(req.ReqID, query.Amount, p.ReplyBlockHeaders(req.ReqID, headers), task.done())
}()
case BlockHeadersMsg:
if pm.downloader == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received block header response message")
// A batch of headers arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Headers []*types.Header
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
if pm.fetcher != nil && pm.fetcher.requestedID(resp.ReqID) {
pm.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)
} else {
err := pm.downloader.DeliverHeaders(p.id, resp.Headers)
if err != nil {
log.Debug(fmt.Sprint(err))
}
}
case GetBlockBodiesMsg:
p.Log().Trace("Received block bodies request")
// Decode the retrieval message
var req struct {
ReqID uint64
Hashes []common.Hash
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather blocks until the fetch or network limits is reached
var (
bytes int
bodies []rlp.RawValue
)
reqCnt := len(req.Hashes)
if !accept(req.ReqID, uint64(reqCnt), MaxBodyFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, hash := range req.Hashes {
if i != 0 && !task.waitOrStop() {
return
}
if bytes >= softResponseLimit {
break
}
// Retrieve the requested block body, stopping if enough was found
if number := rawdb.ReadHeaderNumber(pm.chainDb, hash); number != nil {
if data := rawdb.ReadBodyRLP(pm.chainDb, hash, *number); len(data) != 0 {
bodies = append(bodies, data)
bytes += len(data)
}
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyBlockBodiesRLP(req.ReqID, bodies), task.done())
}()
case BlockBodiesMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received block bodies response")
// A batch of block bodies arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data []*types.Body
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgBlockBodies,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case GetCodeMsg:
p.Log().Trace("Received code request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []CodeReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
data [][]byte
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxCodeFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
// Look up the root hash belonging to the request
number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash)
if number == nil {
p.Log().Warn("Failed to retrieve block num for code", "hash", req.BHash)
continue
}
header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number)
if header == nil {
p.Log().Warn("Failed to retrieve header for code", "block", *number, "hash", req.BHash)
continue
}
triedb := pm.blockchain.StateCache().TrieDB()
account, err := pm.getAccount(triedb, header.Root, common.BytesToHash(req.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
continue
}
code, err := triedb.Node(common.BytesToHash(account.CodeHash))
if err != nil {
p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err)
continue
}
// Accumulate the code and abort if enough data was retrieved
data = append(data, code)
if bytes += len(code); bytes >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyCode(req.ReqID, data), task.done())
}()
case CodeMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received code response")
// A batch of node state data arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data [][]byte
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgCode,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case GetReceiptsMsg:
p.Log().Trace("Received receipts request")
// Decode the retrieval message
var req struct {
ReqID uint64
Hashes []common.Hash
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
receipts []rlp.RawValue
)
reqCnt := len(req.Hashes)
if !accept(req.ReqID, uint64(reqCnt), MaxReceiptFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, hash := range req.Hashes {
if i != 0 && !task.waitOrStop() {
return
}
if bytes >= softResponseLimit {
break
}
// Retrieve the requested block's receipts, skipping if unknown to us
var results types.Receipts
if number := rawdb.ReadHeaderNumber(pm.chainDb, hash); number != nil {
results = rawdb.ReadReceipts(pm.chainDb, hash, *number)
}
if results == nil {
if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
continue
}
}
// If known, encode and queue for response packet
if encoded, err := rlp.EncodeToBytes(results); err != nil {
log.Error("Failed to encode receipt", "err", err)
} else {
receipts = append(receipts, encoded)
bytes += len(encoded)
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyReceiptsRLP(req.ReqID, receipts), task.done())
}()
case ReceiptsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received receipts response")
// A batch of receipts arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Receipts []types.Receipts
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgReceipts,
ReqID: resp.ReqID,
Obj: resp.Receipts,
}
case GetProofsV1Msg:
p.Log().Trace("Received proofs request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []ProofReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
proofs proofsData
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
// Look up the root hash belonging to the request
number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash)
if number == nil {
p.Log().Warn("Failed to retrieve block num for proof", "hash", req.BHash)
continue
}
header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number)
if header == nil {
p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", req.BHash)
continue
}
// Open the account or storage trie for the request
statedb := pm.blockchain.StateCache()
var trie state.Trie
switch len(req.AccKey) {
case 0:
// No account key specified, open an account trie
trie, err = statedb.OpenTrie(header.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", header.Root, "err", err)
continue
}
default:
// Account key specified, open a storage trie
account, err := pm.getAccount(statedb.TrieDB(), header.Root, common.BytesToHash(req.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
continue
}
trie, err = statedb.OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "root", account.Root, "err", err)
continue
}
}
// Prove the user's request from the account or stroage trie
var proof light.NodeList
if err := trie.Prove(req.Key, 0, &proof); err != nil {
p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
continue
}
proofs = append(proofs, proof)
if bytes += proof.DataSize(); bytes >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofs(req.ReqID, proofs), task.done())
}()
case GetProofsV2Msg:
p.Log().Trace("Received les/2 proofs request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []ProofReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
lastBHash common.Hash
root common.Hash
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
nodes := light.NewNodeSet()
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
// Look up the root hash belonging to the request
var (
number *uint64
header *types.Header
trie state.Trie
)
if req.BHash != lastBHash {
root, lastBHash = common.Hash{}, req.BHash
if number = rawdb.ReadHeaderNumber(pm.chainDb, req.BHash); number == nil {
p.Log().Warn("Failed to retrieve block num for proof", "hash", req.BHash)
continue
}
if header = rawdb.ReadHeader(pm.chainDb, req.BHash, *number); header == nil {
p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", req.BHash)
continue
}
root = header.Root
}
// Open the account or storage trie for the request
statedb := pm.blockchain.StateCache()
switch len(req.AccKey) {
case 0:
// No account key specified, open an account trie
trie, err = statedb.OpenTrie(root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err)
continue
}
default:
// Account key specified, open a storage trie
account, err := pm.getAccount(statedb.TrieDB(), root, common.BytesToHash(req.AccKey))
if err != nil {
p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err)
continue
}
trie, err = statedb.OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root)
if trie == nil || err != nil {
p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "root", account.Root, "err", err)
continue
}
}
// Prove the user's request from the account or stroage trie
if err := trie.Prove(req.Key, req.FromLevel, nodes); err != nil {
p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err)
continue
}
if nodes.DataSize() >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofsV2(req.ReqID, nodes.NodeList()), task.done())
}()
case ProofsV1Msg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received proofs response")
// A batch of merkle proofs arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data []light.NodeList
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgProofsV1,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case ProofsV2Msg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received les/2 proofs response")
// A batch of merkle proofs arrived to one of our previous requests
var resp struct {
ReqID, BV uint64
Data light.NodeList
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgProofsV2,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case GetHeaderProofsMsg:
p.Log().Trace("Received headers proof request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []ChtReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
bytes int
proofs []ChtResp
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
trieDb := trie.NewDatabase(rawdb.NewTable(pm.chainDb, light.ChtTablePrefix))
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil {
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*pm.iConfig.ChtSize-1)
if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) {
trie, err := trie.New(root, trieDb)
if err != nil {
continue
}
var encNumber [8]byte
binary.BigEndian.PutUint64(encNumber[:], req.BlockNum)
var proof light.NodeList
trie.Prove(encNumber[:], 0, &proof)
proofs = append(proofs, ChtResp{Header: header, Proof: proof})
if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit {
break
}
}
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHeaderProofs(req.ReqID, proofs), task.done())
}()
case GetHelperTrieProofsMsg:
p.Log().Trace("Received helper trie proof request")
// Decode the retrieval message
var req struct {
ReqID uint64
Reqs []HelperTrieReq
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
// Gather state data until the fetch or network limits is reached
var (
auxBytes int
auxData [][]byte
)
reqCnt := len(req.Reqs)
if !accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) {
return errResp(ErrRequestRejected, "")
}
go func() {
var (
lastIdx uint64
lastType uint
root common.Hash
auxTrie *trie.Trie
)
nodes := light.NewNodeSet()
for i, req := range req.Reqs {
if i != 0 && !task.waitOrStop() {
return
}
if auxTrie == nil || req.Type != lastType || req.TrieIdx != lastIdx {
auxTrie, lastType, lastIdx = nil, req.Type, req.TrieIdx
var prefix string
if root, prefix = pm.getHelperTrie(req.Type, req.TrieIdx); root != (common.Hash{}) {
auxTrie, _ = trie.New(root, trie.NewDatabase(rawdb.NewTable(pm.chainDb, prefix)))
}
}
if req.AuxReq == auxRoot {
var data []byte
if root != (common.Hash{}) {
data = root[:]
}
auxData = append(auxData, data)
auxBytes += len(data)
} else {
if auxTrie != nil {
auxTrie.Prove(req.Key, req.FromLevel, nodes)
}
if req.AuxReq != 0 {
data := pm.getHelperTrieAuxData(req)
auxData = append(auxData, data)
auxBytes += len(data)
}
}
if nodes.DataSize()+auxBytes >= softResponseLimit {
break
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}), task.done())
}()
case HeaderProofsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received headers proof response")
var resp struct {
ReqID, BV uint64
Data []ChtResp
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgHeaderProofs,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case HelperTrieProofsMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received helper trie proof response")
var resp struct {
ReqID, BV uint64
Data HelperTrieResps
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
deliverMsg = &Msg{
MsgType: MsgHelperTrieProofs,
ReqID: resp.ReqID,
Obj: resp.Data,
}
case SendTxMsg:
if pm.txpool == nil {
return errResp(ErrRequestRejected, "")
}
// Transactions arrived, parse all of them and deliver to the pool
var txs []*types.Transaction
if err := msg.Decode(&txs); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
reqCnt := len(txs)
if !accept(0, uint64(reqCnt), MaxTxSend) {
return errResp(ErrRequestRejected, "")
}
go func() {
for i, tx := range txs {
if i != 0 && !task.waitOrStop() {
return
}
pm.txpool.AddRemotes([]*types.Transaction{tx})
}
sendResponse(0, uint64(reqCnt), nil, task.done())
}()
case SendTxV2Msg:
if pm.txpool == nil {
return errResp(ErrRequestRejected, "")
}
// Transactions arrived, parse all of them and deliver to the pool
var req struct {
ReqID uint64
Txs []*types.Transaction
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
reqCnt := len(req.Txs)
if !accept(req.ReqID, uint64(reqCnt), MaxTxSend) {
return errResp(ErrRequestRejected, "")
}
go func() {
stats := make([]txStatus, len(req.Txs))
for i, tx := range req.Txs {
if i != 0 && !task.waitOrStop() {
return
}
hash := tx.Hash()
stats[i] = pm.txStatus(hash)
if stats[i].Status == core.TxStatusUnknown {
if errs := pm.txpool.AddRemotes([]*types.Transaction{tx}); errs[0] != nil {
stats[i].Error = errs[0].Error()
continue
}
stats[i] = pm.txStatus(hash)
}
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyTxStatus(req.ReqID, stats), task.done())
}()
case GetTxStatusMsg:
if pm.txpool == nil {
return errResp(ErrUnexpectedResponse, "")
}
// Transactions arrived, parse all of them and deliver to the pool
var req struct {
ReqID uint64
Hashes []common.Hash
}
if err := msg.Decode(&req); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
reqCnt := len(req.Hashes)
if !accept(req.ReqID, uint64(reqCnt), MaxTxStatus) {
return errResp(ErrRequestRejected, "")
}
go func() {
stats := make([]txStatus, len(req.Hashes))
for i, hash := range req.Hashes {
if i != 0 && !task.waitOrStop() {
return
}
stats[i] = pm.txStatus(hash)
}
sendResponse(req.ReqID, uint64(reqCnt), p.ReplyTxStatus(req.ReqID, stats), task.done())
}()
case TxStatusMsg:
if pm.odr == nil {
return errResp(ErrUnexpectedResponse, "")
}
p.Log().Trace("Received tx status response")
var resp struct {
ReqID, BV uint64
Status []txStatus
}
if err := msg.Decode(&resp); err != nil {
return errResp(ErrDecode, "msg %v: %v", msg, err)
}
p.fcServer.ReceivedReply(resp.ReqID, resp.BV)
default:
p.Log().Trace("Received unknown message", "code", msg.Code)
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
}
if deliverMsg != nil {
err := pm.retriever.deliver(p, deliverMsg)
if err != nil {
p.responseErrors++
if p.responseErrors > maxResponseErrors {
return err
}
}
}
return nil
}
// getAccount retrieves an account from the state based at root.
func (pm *ProtocolManager) getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) {
trie, err := trie.New(root, triedb)
if err != nil {
return state.Account{}, err
}
blob, err := trie.TryGet(hash[:])
if err != nil {
return state.Account{}, err
}
var account state.Account
if err = rlp.DecodeBytes(blob, &account); err != nil {
return state.Account{}, err
}
return account, nil
}
// getHelperTrie returns the post-processed trie root for the given trie ID and section index
func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) {
switch id {
case htCanonical:
idxV1 := (idx+1)*(pm.iConfig.PairChtSize/pm.iConfig.ChtSize) - 1
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idxV1+1)*pm.iConfig.ChtSize-1)
return light.GetChtRoot(pm.chainDb, idxV1, sectionHead), light.ChtTablePrefix
case htBloomBits:
sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.BloomTrieSize-1)
return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix
}
return common.Hash{}, ""
}
// getHelperTrieAuxData returns requested auxiliary data for the given HelperTrie request
func (pm *ProtocolManager) getHelperTrieAuxData(req HelperTrieReq) []byte {
if req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8 {
blockNum := binary.BigEndian.Uint64(req.Key)
hash := rawdb.ReadCanonicalHash(pm.chainDb, blockNum)
return rawdb.ReadHeaderRLP(pm.chainDb, hash, blockNum)
}
return nil
}
func (pm *ProtocolManager) txStatus(hash common.Hash) txStatus {
var stat txStatus
stat.Status = pm.txpool.Status([]common.Hash{hash})[0]
// If the transaction is unknown to the pool, try looking it up locally
if stat.Status == core.TxStatusUnknown {
if tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(pm.chainDb, hash); tx != nil {
stat.Status = core.TxStatusIncluded
stat.Lookup = &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
}
}
return stat
}
// isULCEnabled returns true if we can use ULC
func (pm *ProtocolManager) isULCEnabled() bool {
if pm.ulc == nil || len(pm.ulc.trustedKeys) == 0 {
return false
}
return true
}
// downloaderPeerNotify implements peerSetNotify
type downloaderPeerNotify ProtocolManager
type peerConnection struct {
manager *ProtocolManager
peer *peer
}
func (pc *peerConnection) Head() (common.Hash, *big.Int) {
return pc.peer.HeadAndTd()
}
func (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {
reqID := genReqID()
rq := &distReq{
getCost: func(dp distPeer) uint64 {
peer := dp.(*peer)
return peer.GetRequestCost(GetBlockHeadersMsg, amount)
},
canSend: func(dp distPeer) bool {
return dp.(*peer) == pc.peer
},
request: func(dp distPeer) func() {
peer := dp.(*peer)
cost := peer.GetRequestCost(GetBlockHeadersMsg, amount)
peer.fcServer.QueuedRequest(reqID, cost)
return func() { peer.RequestHeadersByHash(reqID, cost, origin, amount, skip, reverse) }
},
}
_, ok := <-pc.manager.reqDist.queue(rq)
if !ok {
return light.ErrNoPeers
}
return nil
}
func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {
reqID := genReqID()
rq := &distReq{
getCost: func(dp distPeer) uint64 {
peer := dp.(*peer)
return peer.GetRequestCost(GetBlockHeadersMsg, amount)
},
canSend: func(dp distPeer) bool {
return dp.(*peer) == pc.peer
},
request: func(dp distPeer) func() {
peer := dp.(*peer)
cost := peer.GetRequestCost(GetBlockHeadersMsg, amount)
peer.fcServer.QueuedRequest(reqID, cost)
return func() { peer.RequestHeadersByNumber(reqID, cost, origin, amount, skip, reverse) }
},
}
_, ok := <-pc.manager.reqDist.queue(rq)
if !ok {
return light.ErrNoPeers
}
return nil
}
func (d *downloaderPeerNotify) registerPeer(p *peer) {
pm := (*ProtocolManager)(d)
pc := &peerConnection{
manager: pm,
peer: p,
}
pm.downloader.RegisterLightPeer(p.id, ethVersion, pc)
}
func (d *downloaderPeerNotify) unregisterPeer(p *peer) {
pm := (*ProtocolManager)(d)
pm.downloader.UnregisterPeer(p.id)
}
| les/handler.go | 1 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.03763309121131897,
0.00048640501336194575,
0.00016386639617849141,
0.0001693595404503867,
0.0031935512088239193
] |
{
"id": 4,
"code_window": [
"\t\tisTrusted: isTrusted,\n",
"\t}\n",
"}\n",
"\n",
"// rejectUpdate returns true if a parameter update has to be rejected because\n",
"// the size and/or rate of updates exceed the capacity limitation\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terrCh: make(chan error, 1),\n"
],
"file_path": "les/peer.go",
"type": "add",
"edit_start_line_idx": 109
} | // Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package rpc
import (
"context"
"io"
"sync/atomic"
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/log"
)
const MetadataApi = "rpc"
// CodecOption specifies which type of messages a codec supports.
//
// Deprecated: this option is no longer honored by Server.
type CodecOption int
const (
// OptionMethodInvocation is an indication that the codec supports RPC method calls
OptionMethodInvocation CodecOption = 1 << iota
// OptionSubscriptions is an indication that the codec suports RPC notifications
OptionSubscriptions = 1 << iota // support pub sub
)
// Server is an RPC server.
type Server struct {
services serviceRegistry
idgen func() ID
run int32
codecs mapset.Set
}
// NewServer creates a new server instance with no registered handlers.
func NewServer() *Server {
server := &Server{idgen: randomIDGenerator(), codecs: mapset.NewSet(), run: 1}
// Register the default service providing meta information about the RPC service such
// as the services and methods it offers.
rpcService := &RPCService{server}
server.RegisterName(MetadataApi, rpcService)
return server
}
// RegisterName creates a service for the given receiver type under the given name. When no
// methods on the given receiver match the criteria to be either a RPC method or a
// subscription an error is returned. Otherwise a new service is created and added to the
// service collection this server provides to clients.
func (s *Server) RegisterName(name string, receiver interface{}) error {
return s.services.registerName(name, receiver)
}
// ServeCodec reads incoming requests from codec, calls the appropriate callback and writes
// the response back using the given codec. It will block until the codec is closed or the
// server is stopped. In either case the codec is closed.
//
// Note that codec options are no longer supported.
func (s *Server) ServeCodec(codec ServerCodec, options CodecOption) {
defer codec.Close()
// Don't serve if server is stopped.
if atomic.LoadInt32(&s.run) == 0 {
return
}
// Add the codec to the set so it can be closed by Stop.
s.codecs.Add(codec)
defer s.codecs.Remove(codec)
c := initClient(codec, s.idgen, &s.services)
<-codec.Closed()
c.Close()
}
// serveSingleRequest reads and processes a single RPC request from the given codec. This
// is used to serve HTTP connections. Subscriptions and reverse calls are not allowed in
// this mode.
func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) {
// Don't serve if server is stopped.
if atomic.LoadInt32(&s.run) == 0 {
return
}
h := newHandler(ctx, codec, s.idgen, &s.services)
h.allowSubscribe = false
defer h.close(io.EOF, nil)
reqs, batch, err := codec.Read()
if err != nil {
if err != io.EOF {
codec.Write(ctx, errorMessage(&invalidMessageError{"parse error"}))
}
return
}
if batch {
h.handleBatch(reqs)
} else {
h.handleMsg(reqs[0])
}
}
// Stop stops reading new requests, waits for stopPendingRequestTimeout to allow pending
// requests to finish, then closes all codecs which will cancel pending requests and
// subscriptions.
func (s *Server) Stop() {
if atomic.CompareAndSwapInt32(&s.run, 1, 0) {
log.Debug("RPC server shutting down")
s.codecs.Each(func(c interface{}) bool {
c.(ServerCodec).Close()
return true
})
}
}
// RPCService gives meta information about the server.
// e.g. gives information about the loaded modules.
type RPCService struct {
server *Server
}
// Modules returns the list of RPC services with their version number
func (s *RPCService) Modules() map[string]string {
s.server.services.mu.Lock()
defer s.server.services.mu.Unlock()
modules := make(map[string]string)
for name := range s.server.services.services {
modules[name] = "1.0"
}
return modules
}
| rpc/server.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.0005091977654956281,
0.00020040906383655965,
0.00016467431851197034,
0.00017374679737258703,
0.00008407450513914227
] |
{
"id": 4,
"code_window": [
"\t\tisTrusted: isTrusted,\n",
"\t}\n",
"}\n",
"\n",
"// rejectUpdate returns true if a parameter update has to be rejected because\n",
"// the size and/or rate of updates exceed the capacity limitation\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terrCh: make(chan error, 1),\n"
],
"file_path": "les/peer.go",
"type": "add",
"edit_start_line_idx": 109
} | // Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package state
import (
"bytes"
"errors"
"io/ioutil"
"os"
"strings"
"testing"
)
var ErrInvalidArraySize = errors.New("invalid byte array size")
var ErrInvalidValuePersisted = errors.New("invalid value was persisted to the db")
type SerializingType struct {
key string
value string
}
func (st *SerializingType) MarshalBinary() (data []byte, err error) {
d := []byte(strings.Join([]string{st.key, st.value}, ";"))
return d, nil
}
func (st *SerializingType) UnmarshalBinary(data []byte) (err error) {
d := bytes.Split(data, []byte(";"))
l := len(d)
if l == 0 {
return ErrInvalidArraySize
}
if l == 2 {
keyLen := len(d[0])
st.key = string(d[0][:keyLen])
valLen := len(d[1])
st.value = string(d[1][:valLen])
}
return nil
}
// TestDBStore tests basic functionality of DBStore.
func TestDBStore(t *testing.T) {
dir, err := ioutil.TempDir("", "db_store_test")
if err != nil {
panic(err)
}
defer os.RemoveAll(dir)
store, err := NewDBStore(dir)
if err != nil {
t.Fatal(err)
}
testStore(t, store)
store.Close()
persistedStore, err := NewDBStore(dir)
if err != nil {
t.Fatal(err)
}
defer persistedStore.Close()
testPersistedStore(t, persistedStore)
}
func testStore(t *testing.T, store Store) {
ser := &SerializingType{key: "key1", value: "value1"}
jsonify := []string{"a", "b", "c"}
err := store.Put(ser.key, ser)
if err != nil {
t.Fatal(err)
}
err = store.Put("key2", jsonify)
if err != nil {
t.Fatal(err)
}
}
func testPersistedStore(t *testing.T, store Store) {
ser := &SerializingType{}
err := store.Get("key1", ser)
if err != nil {
t.Fatal(err)
}
if ser.key != "key1" || ser.value != "value1" {
t.Fatal(ErrInvalidValuePersisted)
}
as := []string{}
err = store.Get("key2", &as)
if err != nil {
t.Fatal(err)
}
if len(as) != 3 {
t.Fatalf("serialized array did not match expectation")
}
if as[0] != "a" || as[1] != "b" || as[2] != "c" {
t.Fatalf("elements serialized did not match expected values")
}
}
| swarm/state/dbstore_test.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.00018964307673741132,
0.00017302302876487374,
0.00016763596795499325,
0.00017235464474651963,
0.00000542315729035181
] |
{
"id": 4,
"code_window": [
"\t\tisTrusted: isTrusted,\n",
"\t}\n",
"}\n",
"\n",
"// rejectUpdate returns true if a parameter update has to be rejected because\n",
"// the size and/or rate of updates exceed the capacity limitation\n"
],
"labels": [
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\terrCh: make(chan error, 1),\n"
],
"file_path": "les/peer.go",
"type": "add",
"edit_start_line_idx": 109
} | // Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package clique implements the proof-of-authority consensus engine.
package clique
import (
"bytes"
"errors"
"io"
"math/big"
"math/rand"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/rpc"
lru "github.com/hashicorp/golang-lru"
"golang.org/x/crypto/sha3"
)
const (
checkpointInterval = 1024 // Number of blocks after which to save the vote snapshot to the database
inmemorySnapshots = 128 // Number of recent vote snapshots to keep in memory
inmemorySignatures = 4096 // Number of recent block signatures to keep in memory
wiggleTime = 500 * time.Millisecond // Random delay (per signer) to allow concurrent signers
)
// Clique proof-of-authority protocol constants.
var (
epochLength = uint64(30000) // Default number of blocks after which to checkpoint and reset the pending votes
extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity
extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal
nonceAuthVote = hexutil.MustDecode("0xffffffffffffffff") // Magic nonce number to vote on adding a new signer
nonceDropVote = hexutil.MustDecode("0x0000000000000000") // Magic nonce number to vote on removing a signer.
uncleHash = types.CalcUncleHash(nil) // Always Keccak256(RLP([])) as uncles are meaningless outside of PoW.
diffInTurn = big.NewInt(2) // Block difficulty for in-turn signatures
diffNoTurn = big.NewInt(1) // Block difficulty for out-of-turn signatures
)
// Various error messages to mark blocks invalid. These should be private to
// prevent engine specific errors from being referenced in the remainder of the
// codebase, inherently breaking if the engine is swapped out. Please put common
// error types into the consensus package.
var (
// errUnknownBlock is returned when the list of signers is requested for a block
// that is not part of the local blockchain.
errUnknownBlock = errors.New("unknown block")
// errInvalidCheckpointBeneficiary is returned if a checkpoint/epoch transition
// block has a beneficiary set to non-zeroes.
errInvalidCheckpointBeneficiary = errors.New("beneficiary in checkpoint block non-zero")
// errInvalidVote is returned if a nonce value is something else that the two
// allowed constants of 0x00..0 or 0xff..f.
errInvalidVote = errors.New("vote nonce not 0x00..0 or 0xff..f")
// errInvalidCheckpointVote is returned if a checkpoint/epoch transition block
// has a vote nonce set to non-zeroes.
errInvalidCheckpointVote = errors.New("vote nonce in checkpoint block non-zero")
// errMissingVanity is returned if a block's extra-data section is shorter than
// 32 bytes, which is required to store the signer vanity.
errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing")
// errMissingSignature is returned if a block's extra-data section doesn't seem
// to contain a 65 byte secp256k1 signature.
errMissingSignature = errors.New("extra-data 65 byte signature suffix missing")
// errExtraSigners is returned if non-checkpoint block contain signer data in
// their extra-data fields.
errExtraSigners = errors.New("non-checkpoint block contains extra signer list")
// errInvalidCheckpointSigners is returned if a checkpoint block contains an
// invalid list of signers (i.e. non divisible by 20 bytes).
errInvalidCheckpointSigners = errors.New("invalid signer list on checkpoint block")
// errMismatchingCheckpointSigners is returned if a checkpoint block contains a
// list of signers different than the one the local node calculated.
errMismatchingCheckpointSigners = errors.New("mismatching signer list on checkpoint block")
// errInvalidMixDigest is returned if a block's mix digest is non-zero.
errInvalidMixDigest = errors.New("non-zero mix digest")
// errInvalidUncleHash is returned if a block contains an non-empty uncle list.
errInvalidUncleHash = errors.New("non empty uncle hash")
// errInvalidDifficulty is returned if the difficulty of a block neither 1 or 2.
errInvalidDifficulty = errors.New("invalid difficulty")
// errWrongDifficulty is returned if the difficulty of a block doesn't match the
// turn of the signer.
errWrongDifficulty = errors.New("wrong difficulty")
// ErrInvalidTimestamp is returned if the timestamp of a block is lower than
// the previous block's timestamp + the minimum block period.
ErrInvalidTimestamp = errors.New("invalid timestamp")
// errInvalidVotingChain is returned if an authorization list is attempted to
// be modified via out-of-range or non-contiguous headers.
errInvalidVotingChain = errors.New("invalid voting chain")
// errUnauthorizedSigner is returned if a header is signed by a non-authorized entity.
errUnauthorizedSigner = errors.New("unauthorized signer")
// errRecentlySigned is returned if a header is signed by an authorized entity
// that already signed a header recently, thus is temporarily not allowed to.
errRecentlySigned = errors.New("recently signed")
)
// SignerFn is a signer callback function to request a header to be signed by a
// backing account.
type SignerFn func(accounts.Account, string, []byte) ([]byte, error)
// ecrecover extracts the Ethereum account address from a signed header.
func ecrecover(header *types.Header, sigcache *lru.ARCCache) (common.Address, error) {
// If the signature's already cached, return that
hash := header.Hash()
if address, known := sigcache.Get(hash); known {
return address.(common.Address), nil
}
// Retrieve the signature from the header extra-data
if len(header.Extra) < extraSeal {
return common.Address{}, errMissingSignature
}
signature := header.Extra[len(header.Extra)-extraSeal:]
// Recover the public key and the Ethereum address
pubkey, err := crypto.Ecrecover(SealHash(header).Bytes(), signature)
if err != nil {
return common.Address{}, err
}
var signer common.Address
copy(signer[:], crypto.Keccak256(pubkey[1:])[12:])
sigcache.Add(hash, signer)
return signer, nil
}
// Clique is the proof-of-authority consensus engine proposed to support the
// Ethereum testnet following the Ropsten attacks.
type Clique struct {
config *params.CliqueConfig // Consensus engine configuration parameters
db ethdb.Database // Database to store and retrieve snapshot checkpoints
recents *lru.ARCCache // Snapshots for recent block to speed up reorgs
signatures *lru.ARCCache // Signatures of recent blocks to speed up mining
proposals map[common.Address]bool // Current list of proposals we are pushing
signer common.Address // Ethereum address of the signing key
signFn SignerFn // Signer function to authorize hashes with
lock sync.RWMutex // Protects the signer fields
// The fields below are for testing only
fakeDiff bool // Skip difficulty verifications
}
// New creates a Clique proof-of-authority consensus engine with the initial
// signers set to the ones provided by the user.
func New(config *params.CliqueConfig, db ethdb.Database) *Clique {
// Set any missing consensus parameters to their defaults
conf := *config
if conf.Epoch == 0 {
conf.Epoch = epochLength
}
// Allocate the snapshot caches and create the engine
recents, _ := lru.NewARC(inmemorySnapshots)
signatures, _ := lru.NewARC(inmemorySignatures)
return &Clique{
config: &conf,
db: db,
recents: recents,
signatures: signatures,
proposals: make(map[common.Address]bool),
}
}
// Author implements consensus.Engine, returning the Ethereum address recovered
// from the signature in the header's extra-data section.
func (c *Clique) Author(header *types.Header) (common.Address, error) {
return ecrecover(header, c.signatures)
}
// VerifyHeader checks whether a header conforms to the consensus rules.
func (c *Clique) VerifyHeader(chain consensus.ChainReader, header *types.Header, seal bool) error {
return c.verifyHeader(chain, header, nil)
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers. The
// method returns a quit channel to abort the operations and a results channel to
// retrieve the async verifications (the order is that of the input slice).
func (c *Clique) VerifyHeaders(chain consensus.ChainReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
abort := make(chan struct{})
results := make(chan error, len(headers))
go func() {
for i, header := range headers {
err := c.verifyHeader(chain, header, headers[:i])
select {
case <-abort:
return
case results <- err:
}
}
}()
return abort, results
}
// verifyHeader checks whether a header conforms to the consensus rules.The
// caller may optionally pass in a batch of parents (ascending order) to avoid
// looking those up from the database. This is useful for concurrently verifying
// a batch of new headers.
func (c *Clique) verifyHeader(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
if header.Number == nil {
return errUnknownBlock
}
number := header.Number.Uint64()
// Don't waste time checking blocks from the future
if header.Time.Cmp(big.NewInt(time.Now().Unix())) > 0 {
return consensus.ErrFutureBlock
}
// Checkpoint blocks need to enforce zero beneficiary
checkpoint := (number % c.config.Epoch) == 0
if checkpoint && header.Coinbase != (common.Address{}) {
return errInvalidCheckpointBeneficiary
}
// Nonces must be 0x00..0 or 0xff..f, zeroes enforced on checkpoints
if !bytes.Equal(header.Nonce[:], nonceAuthVote) && !bytes.Equal(header.Nonce[:], nonceDropVote) {
return errInvalidVote
}
if checkpoint && !bytes.Equal(header.Nonce[:], nonceDropVote) {
return errInvalidCheckpointVote
}
// Check that the extra-data contains both the vanity and signature
if len(header.Extra) < extraVanity {
return errMissingVanity
}
if len(header.Extra) < extraVanity+extraSeal {
return errMissingSignature
}
// Ensure that the extra-data contains a signer list on checkpoint, but none otherwise
signersBytes := len(header.Extra) - extraVanity - extraSeal
if !checkpoint && signersBytes != 0 {
return errExtraSigners
}
if checkpoint && signersBytes%common.AddressLength != 0 {
return errInvalidCheckpointSigners
}
// Ensure that the mix digest is zero as we don't have fork protection currently
if header.MixDigest != (common.Hash{}) {
return errInvalidMixDigest
}
// Ensure that the block doesn't contain any uncles which are meaningless in PoA
if header.UncleHash != uncleHash {
return errInvalidUncleHash
}
// Ensure that the block's difficulty is meaningful (may not be correct at this point)
if number > 0 {
if header.Difficulty == nil || (header.Difficulty.Cmp(diffInTurn) != 0 && header.Difficulty.Cmp(diffNoTurn) != 0) {
return errInvalidDifficulty
}
}
// If all checks passed, validate any special fields for hard forks
if err := misc.VerifyForkHashes(chain.Config(), header, false); err != nil {
return err
}
// All basic checks passed, verify cascading fields
return c.verifyCascadingFields(chain, header, parents)
}
// verifyCascadingFields verifies all the header fields that are not standalone,
// rather depend on a batch of previous headers. The caller may optionally pass
// in a batch of parents (ascending order) to avoid looking those up from the
// database. This is useful for concurrently verifying a batch of new headers.
func (c *Clique) verifyCascadingFields(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
// The genesis block is the always valid dead-end
number := header.Number.Uint64()
if number == 0 {
return nil
}
// Ensure that the block's timestamp isn't too close to it's parent
var parent *types.Header
if len(parents) > 0 {
parent = parents[len(parents)-1]
} else {
parent = chain.GetHeader(header.ParentHash, number-1)
}
if parent == nil || parent.Number.Uint64() != number-1 || parent.Hash() != header.ParentHash {
return consensus.ErrUnknownAncestor
}
if parent.Time.Uint64()+c.config.Period > header.Time.Uint64() {
return ErrInvalidTimestamp
}
// Retrieve the snapshot needed to verify this header and cache it
snap, err := c.snapshot(chain, number-1, header.ParentHash, parents)
if err != nil {
return err
}
// If the block is a checkpoint block, verify the signer list
if number%c.config.Epoch == 0 {
signers := make([]byte, len(snap.Signers)*common.AddressLength)
for i, signer := range snap.signers() {
copy(signers[i*common.AddressLength:], signer[:])
}
extraSuffix := len(header.Extra) - extraSeal
if !bytes.Equal(header.Extra[extraVanity:extraSuffix], signers) {
return errMismatchingCheckpointSigners
}
}
// All basic checks passed, verify the seal and return
return c.verifySeal(chain, header, parents)
}
// snapshot retrieves the authorization snapshot at a given point in time.
func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) {
// Search for a snapshot in memory or on disk for checkpoints
var (
headers []*types.Header
snap *Snapshot
)
for snap == nil {
// If an in-memory snapshot was found, use that
if s, ok := c.recents.Get(hash); ok {
snap = s.(*Snapshot)
break
}
// If an on-disk checkpoint snapshot can be found, use that
if number%checkpointInterval == 0 {
if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil {
log.Trace("Loaded voting snapshot from disk", "number", number, "hash", hash)
snap = s
break
}
}
// If we're at an checkpoint block, make a snapshot if it's known
if number == 0 || (number%c.config.Epoch == 0 && chain.GetHeaderByNumber(number-1) == nil) {
checkpoint := chain.GetHeaderByNumber(number)
if checkpoint != nil {
hash := checkpoint.Hash()
signers := make([]common.Address, (len(checkpoint.Extra)-extraVanity-extraSeal)/common.AddressLength)
for i := 0; i < len(signers); i++ {
copy(signers[i][:], checkpoint.Extra[extraVanity+i*common.AddressLength:])
}
snap = newSnapshot(c.config, c.signatures, number, hash, signers)
if err := snap.store(c.db); err != nil {
return nil, err
}
log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash)
break
}
}
// No snapshot for this header, gather the header and move backward
var header *types.Header
if len(parents) > 0 {
// If we have explicit parents, pick from there (enforced)
header = parents[len(parents)-1]
if header.Hash() != hash || header.Number.Uint64() != number {
return nil, consensus.ErrUnknownAncestor
}
parents = parents[:len(parents)-1]
} else {
// No explicit parents (or no more left), reach out to the database
header = chain.GetHeader(hash, number)
if header == nil {
return nil, consensus.ErrUnknownAncestor
}
}
headers = append(headers, header)
number, hash = number-1, header.ParentHash
}
// Previous snapshot found, apply any pending headers on top of it
for i := 0; i < len(headers)/2; i++ {
headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i]
}
snap, err := snap.apply(headers)
if err != nil {
return nil, err
}
c.recents.Add(snap.Hash, snap)
// If we've generated a new checkpoint snapshot, save to disk
if snap.Number%checkpointInterval == 0 && len(headers) > 0 {
if err = snap.store(c.db); err != nil {
return nil, err
}
log.Trace("Stored voting snapshot to disk", "number", snap.Number, "hash", snap.Hash)
}
return snap, err
}
// VerifyUncles implements consensus.Engine, always returning an error for any
// uncles as this consensus mechanism doesn't permit uncles.
func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
if len(block.Uncles()) > 0 {
return errors.New("uncles not allowed")
}
return nil
}
// VerifySeal implements consensus.Engine, checking whether the signature contained
// in the header satisfies the consensus protocol requirements.
func (c *Clique) VerifySeal(chain consensus.ChainReader, header *types.Header) error {
return c.verifySeal(chain, header, nil)
}
// verifySeal checks whether the signature contained in the header satisfies the
// consensus protocol requirements. The method accepts an optional list of parent
// headers that aren't yet part of the local blockchain to generate the snapshots
// from.
func (c *Clique) verifySeal(chain consensus.ChainReader, header *types.Header, parents []*types.Header) error {
// Verifying the genesis block is not supported
number := header.Number.Uint64()
if number == 0 {
return errUnknownBlock
}
// Retrieve the snapshot needed to verify this header and cache it
snap, err := c.snapshot(chain, number-1, header.ParentHash, parents)
if err != nil {
return err
}
// Resolve the authorization key and check against signers
signer, err := ecrecover(header, c.signatures)
if err != nil {
return err
}
if _, ok := snap.Signers[signer]; !ok {
return errUnauthorizedSigner
}
for seen, recent := range snap.Recents {
if recent == signer {
// Signer is among recents, only fail if the current block doesn't shift it out
if limit := uint64(len(snap.Signers)/2 + 1); seen > number-limit {
return errRecentlySigned
}
}
}
// Ensure that the difficulty corresponds to the turn-ness of the signer
if !c.fakeDiff {
inturn := snap.inturn(header.Number.Uint64(), signer)
if inturn && header.Difficulty.Cmp(diffInTurn) != 0 {
return errWrongDifficulty
}
if !inturn && header.Difficulty.Cmp(diffNoTurn) != 0 {
return errWrongDifficulty
}
}
return nil
}
// Prepare implements consensus.Engine, preparing all the consensus fields of the
// header for running the transactions on top.
func (c *Clique) Prepare(chain consensus.ChainReader, header *types.Header) error {
// If the block isn't a checkpoint, cast a random vote (good enough for now)
header.Coinbase = common.Address{}
header.Nonce = types.BlockNonce{}
number := header.Number.Uint64()
// Assemble the voting snapshot to check which votes make sense
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
return err
}
if number%c.config.Epoch != 0 {
c.lock.RLock()
// Gather all the proposals that make sense voting on
addresses := make([]common.Address, 0, len(c.proposals))
for address, authorize := range c.proposals {
if snap.validVote(address, authorize) {
addresses = append(addresses, address)
}
}
// If there's pending proposals, cast a vote on them
if len(addresses) > 0 {
header.Coinbase = addresses[rand.Intn(len(addresses))]
if c.proposals[header.Coinbase] {
copy(header.Nonce[:], nonceAuthVote)
} else {
copy(header.Nonce[:], nonceDropVote)
}
}
c.lock.RUnlock()
}
// Set the correct difficulty
header.Difficulty = CalcDifficulty(snap, c.signer)
// Ensure the extra data has all it's components
if len(header.Extra) < extraVanity {
header.Extra = append(header.Extra, bytes.Repeat([]byte{0x00}, extraVanity-len(header.Extra))...)
}
header.Extra = header.Extra[:extraVanity]
if number%c.config.Epoch == 0 {
for _, signer := range snap.signers() {
header.Extra = append(header.Extra, signer[:]...)
}
}
header.Extra = append(header.Extra, make([]byte, extraSeal)...)
// Mix digest is reserved for now, set to empty
header.MixDigest = common.Hash{}
// Ensure the timestamp has the correct delay
parent := chain.GetHeader(header.ParentHash, number-1)
if parent == nil {
return consensus.ErrUnknownAncestor
}
header.Time = new(big.Int).Add(parent.Time, new(big.Int).SetUint64(c.config.Period))
if header.Time.Int64() < time.Now().Unix() {
header.Time = big.NewInt(time.Now().Unix())
}
return nil
}
// Finalize implements consensus.Engine, ensuring no uncles are set, nor block
// rewards given, and returns the final block.
func (c *Clique) Finalize(chain consensus.ChainReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
// No block rewards in PoA, so the state remains as is and uncles are dropped
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
header.UncleHash = types.CalcUncleHash(nil)
// Assemble and return the final block for sealing
return types.NewBlock(header, txs, nil, receipts), nil
}
// Authorize injects a private key into the consensus engine to mint new blocks
// with.
func (c *Clique) Authorize(signer common.Address, signFn SignerFn) {
c.lock.Lock()
defer c.lock.Unlock()
c.signer = signer
c.signFn = signFn
}
// Seal implements consensus.Engine, attempting to create a sealed block using
// the local signing credentials.
func (c *Clique) Seal(chain consensus.ChainReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
header := block.Header()
// Sealing the genesis block is not supported
number := header.Number.Uint64()
if number == 0 {
return errUnknownBlock
}
// For 0-period chains, refuse to seal empty blocks (no reward but would spin sealing)
if c.config.Period == 0 && len(block.Transactions()) == 0 {
log.Info("Sealing paused, waiting for transactions")
return nil
}
// Don't hold the signer fields for the entire sealing procedure
c.lock.RLock()
signer, signFn := c.signer, c.signFn
c.lock.RUnlock()
// Bail out if we're unauthorized to sign a block
snap, err := c.snapshot(chain, number-1, header.ParentHash, nil)
if err != nil {
return err
}
if _, authorized := snap.Signers[signer]; !authorized {
return errUnauthorizedSigner
}
// If we're amongst the recent signers, wait for the next block
for seen, recent := range snap.Recents {
if recent == signer {
// Signer is among recents, only wait if the current block doesn't shift it out
if limit := uint64(len(snap.Signers)/2 + 1); number < limit || seen > number-limit {
log.Info("Signed recently, must wait for others")
return nil
}
}
}
// Sweet, the protocol permits us to sign the block, wait for our time
delay := time.Unix(header.Time.Int64(), 0).Sub(time.Now()) // nolint: gosimple
if header.Difficulty.Cmp(diffNoTurn) == 0 {
// It's not our turn explicitly to sign, delay it a bit
wiggle := time.Duration(len(snap.Signers)/2+1) * wiggleTime
delay += time.Duration(rand.Int63n(int64(wiggle)))
log.Trace("Out-of-turn signing requested", "wiggle", common.PrettyDuration(wiggle))
}
// Sign all the things!
sighash, err := signFn(accounts.Account{Address: signer}, accounts.MimetypeClique, CliqueRLP(header))
if err != nil {
return err
}
copy(header.Extra[len(header.Extra)-extraSeal:], sighash)
// Wait until sealing is terminated or delay timeout.
log.Trace("Waiting for slot to sign and propagate", "delay", common.PrettyDuration(delay))
go func() {
select {
case <-stop:
return
case <-time.After(delay):
}
select {
case results <- block.WithSeal(header):
default:
log.Warn("Sealing result is not read by miner", "sealhash", SealHash(header))
}
}()
return nil
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have based on the previous blocks in the chain and the
// current signer.
func (c *Clique) CalcDifficulty(chain consensus.ChainReader, time uint64, parent *types.Header) *big.Int {
snap, err := c.snapshot(chain, parent.Number.Uint64(), parent.Hash(), nil)
if err != nil {
return nil
}
return CalcDifficulty(snap, c.signer)
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have based on the previous blocks in the chain and the
// current signer.
func CalcDifficulty(snap *Snapshot, signer common.Address) *big.Int {
if snap.inturn(snap.Number+1, signer) {
return new(big.Int).Set(diffInTurn)
}
return new(big.Int).Set(diffNoTurn)
}
// SealHash returns the hash of a block prior to it being sealed.
func (c *Clique) SealHash(header *types.Header) common.Hash {
return SealHash(header)
}
// Close implements consensus.Engine. It's a noop for clique as there are no background threads.
func (c *Clique) Close() error {
return nil
}
// APIs implements consensus.Engine, returning the user facing RPC API to allow
// controlling the signer voting.
func (c *Clique) APIs(chain consensus.ChainReader) []rpc.API {
return []rpc.API{{
Namespace: "clique",
Version: "1.0",
Service: &API{chain: chain, clique: c},
Public: false,
}}
}
// SealHash returns the hash of a block prior to it being sealed.
func SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
encodeSigHeader(hasher, header)
hasher.Sum(hash[:0])
return hash
}
// CliqueRLP returns the rlp bytes which needs to be signed for the proof-of-authority
// sealing. The RLP to sign consists of the entire header apart from the 65 byte signature
// contained at the end of the extra data.
//
// Note, the method requires the extra data to be at least 65 bytes, otherwise it
// panics. This is done to avoid accidentally using both forms (signature present
// or not), which could be abused to produce different hashes for the same header.
func CliqueRLP(header *types.Header) []byte {
b := new(bytes.Buffer)
encodeSigHeader(b, header)
return b.Bytes()
}
func encodeSigHeader(w io.Writer, header *types.Header) {
err := rlp.Encode(w, []interface{}{
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra[:len(header.Extra)-65], // Yes, this will panic if extra is too short
header.MixDigest,
header.Nonce,
})
if err != nil {
panic("can't encode: " + err.Error())
}
}
| consensus/clique/clique.go | 0 | https://github.com/ethereum/go-ethereum/commit/c53c5e616f04ae8b041bfb64309cbc7f3e70303a | [
0.0006359854014590383,
0.00020694706472568214,
0.00016381015302613378,
0.00017079099779948592,
0.00008592059020884335
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"func (queue *TimedQueue) cleanup(tick <-chan time.Time) {\n",
"\tfor now := range tick {\n",
"\t\tnowSec := now.UTC().Unix()\n",
"\t\tfor {\n",
"\t\t\tqueue.access.RLock()\n",
"\t\t\tqueueLen := queue.queue.Len()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnowSec := now.Unix()\n"
],
"file_path": "common/collect/timed_queue.go",
"type": "replace",
"edit_start_line_idx": 74
} | // Package vmess contains protocol definition, io lib for VMess.
package protocol
import (
"crypto/aes"
"crypto/cipher"
"encoding/binary"
"hash/fnv"
"io"
"time"
"github.com/v2ray/v2ray-core/common/alloc"
v2io "github.com/v2ray/v2ray-core/common/io"
"github.com/v2ray/v2ray-core/common/log"
v2net "github.com/v2ray/v2ray-core/common/net"
"github.com/v2ray/v2ray-core/proxy"
"github.com/v2ray/v2ray-core/proxy/vmess/config"
"github.com/v2ray/v2ray-core/proxy/vmess/protocol/user"
"github.com/v2ray/v2ray-core/transport"
)
const (
addrTypeIPv4 = byte(0x01)
addrTypeIPv6 = byte(0x03)
addrTypeDomain = byte(0x02)
CmdTCP = byte(0x01)
CmdUDP = byte(0x02)
Version = byte(0x01)
blockSize = 16
)
// VMessRequest implements the request message of VMess protocol. It only contains the header of a
// request message. The data part will be handled by conection handler directly, in favor of data
// streaming.
type VMessRequest struct {
Version byte
UserId config.ID
RequestIV []byte
RequestKey []byte
ResponseHeader []byte
Command byte
Address v2net.Address
}
// Destination is the final destination of this request.
func (request *VMessRequest) Destination() v2net.Destination {
if request.Command == CmdTCP {
return v2net.NewTCPDestination(request.Address)
} else {
return v2net.NewUDPDestination(request.Address)
}
}
// VMessRequestReader is a parser to read VMessRequest from a byte stream.
type VMessRequestReader struct {
vUserSet user.UserSet
}
// NewVMessRequestReader creates a new VMessRequestReader with a given UserSet
func NewVMessRequestReader(vUserSet user.UserSet) *VMessRequestReader {
return &VMessRequestReader{
vUserSet: vUserSet,
}
}
// Read reads a VMessRequest from a byte stream.
func (r *VMessRequestReader) Read(reader io.Reader) (*VMessRequest, error) {
buffer := alloc.NewSmallBuffer()
nBytes, err := v2net.ReadAllBytes(reader, buffer.Value[:config.IDBytesLen])
if err != nil {
return nil, err
}
userId, timeSec, valid := r.vUserSet.GetUser(buffer.Value[:nBytes])
if !valid {
return nil, proxy.InvalidAuthentication
}
aesCipher, err := aes.NewCipher(userId.CmdKey())
if err != nil {
return nil, err
}
aesStream := cipher.NewCFBDecrypter(aesCipher, user.Int64Hash(timeSec))
decryptor := v2io.NewCryptionReader(aesStream, reader)
if err != nil {
return nil, err
}
nBytes, err = v2net.ReadAllBytes(decryptor, buffer.Value[:41])
if err != nil {
return nil, err
}
bufferLen := nBytes
request := &VMessRequest{
UserId: *userId,
Version: buffer.Value[0],
}
if request.Version != Version {
log.Warning("Invalid protocol version %d", request.Version)
return nil, proxy.InvalidProtocolVersion
}
request.RequestIV = buffer.Value[1:17] // 16 bytes
request.RequestKey = buffer.Value[17:33] // 16 bytes
request.ResponseHeader = buffer.Value[33:37] // 4 bytes
request.Command = buffer.Value[37]
port := binary.BigEndian.Uint16(buffer.Value[38:40])
switch buffer.Value[40] {
case addrTypeIPv4:
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:45]) // 4 bytes
bufferLen += 4
if err != nil {
return nil, err
}
request.Address = v2net.IPAddress(buffer.Value[41:45], port)
case addrTypeIPv6:
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:57]) // 16 bytes
bufferLen += 16
if err != nil {
return nil, err
}
request.Address = v2net.IPAddress(buffer.Value[41:57], port)
case addrTypeDomain:
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:42])
if err != nil {
return nil, err
}
domainLength := int(buffer.Value[41])
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[42:42+domainLength])
if err != nil {
return nil, err
}
bufferLen += 1 + domainLength
request.Address = v2net.DomainAddress(string(buffer.Value[42:42+domainLength]), port)
}
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[bufferLen:bufferLen+4])
if err != nil {
return nil, err
}
fnv1a := fnv.New32a()
fnv1a.Write(buffer.Value[:bufferLen])
actualHash := fnv1a.Sum32()
expectedHash := binary.BigEndian.Uint32(buffer.Value[bufferLen : bufferLen+4])
if actualHash != expectedHash {
return nil, transport.CorruptedPacket
}
return request, nil
}
// ToBytes returns a VMessRequest in the form of byte array.
func (request *VMessRequest) ToBytes(idHash user.CounterHash, randomRangeInt64 user.RandomInt64InRange, buffer *alloc.Buffer) (*alloc.Buffer, error) {
if buffer == nil {
buffer = alloc.NewSmallBuffer().Clear()
}
counter := randomRangeInt64(time.Now().UTC().Unix(), 30)
hash := idHash.Hash(request.UserId.Bytes[:], counter)
buffer.Append(hash)
encryptionBegin := buffer.Len()
buffer.AppendBytes(request.Version)
buffer.Append(request.RequestIV)
buffer.Append(request.RequestKey)
buffer.Append(request.ResponseHeader)
buffer.AppendBytes(request.Command)
buffer.Append(request.Address.PortBytes())
switch {
case request.Address.IsIPv4():
buffer.AppendBytes(addrTypeIPv4)
buffer.Append(request.Address.IP())
case request.Address.IsIPv6():
buffer.AppendBytes(addrTypeIPv6)
buffer.Append(request.Address.IP())
case request.Address.IsDomain():
buffer.AppendBytes(addrTypeDomain, byte(len(request.Address.Domain())))
buffer.Append([]byte(request.Address.Domain()))
}
encryptionEnd := buffer.Len()
fnv1a := fnv.New32a()
fnv1a.Write(buffer.Value[encryptionBegin:encryptionEnd])
fnvHash := fnv1a.Sum32()
buffer.AppendBytes(byte(fnvHash>>24), byte(fnvHash>>16), byte(fnvHash>>8), byte(fnvHash))
encryptionEnd += 4
aesCipher, err := aes.NewCipher(request.UserId.CmdKey())
if err != nil {
return nil, err
}
aesStream := cipher.NewCFBEncrypter(aesCipher, user.Int64Hash(counter))
aesStream.XORKeyStream(buffer.Value[encryptionBegin:encryptionEnd], buffer.Value[encryptionBegin:encryptionEnd])
return buffer, nil
}
| proxy/vmess/protocol/vmess.go | 1 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.0010304334573447704,
0.00021732127061113715,
0.00016542673984076828,
0.0001704408205114305,
0.00017901942192111164
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"func (queue *TimedQueue) cleanup(tick <-chan time.Time) {\n",
"\tfor now := range tick {\n",
"\t\tnowSec := now.UTC().Unix()\n",
"\t\tfor {\n",
"\t\t\tqueue.access.RLock()\n",
"\t\t\tqueueLen := queue.queue.Len()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnowSec := now.Unix()\n"
],
"file_path": "common/collect/timed_queue.go",
"type": "replace",
"edit_start_line_idx": 74
} | The MIT License (MIT)
Copyright (c) 2015 V2Ray
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| LICENSE | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00017663782637100667,
0.00017247488722205162,
0.00016922492068260908,
0.00017156188550870866,
0.000003094399062320008
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"func (queue *TimedQueue) cleanup(tick <-chan time.Time) {\n",
"\tfor now := range tick {\n",
"\t\tnowSec := now.UTC().Unix()\n",
"\t\tfor {\n",
"\t\t\tqueue.access.RLock()\n",
"\t\t\tqueueLen := queue.queue.Len()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnowSec := now.Unix()\n"
],
"file_path": "common/collect/timed_queue.go",
"type": "replace",
"edit_start_line_idx": 74
} | package io
import (
"crypto/cipher"
"io"
)
// CryptionReader is a general purpose reader that applies a stream cipher on top of a regular reader.
type CryptionReader struct {
stream cipher.Stream
reader io.Reader
}
// NewCryptionReader creates a new CryptionReader instance from given stream cipher and reader.
func NewCryptionReader(stream cipher.Stream, reader io.Reader) *CryptionReader {
return &CryptionReader{
stream: stream,
reader: reader,
}
}
// Read reads blocks from underlying reader, and crypt it. The content of blocks is modified in place.
func (reader CryptionReader) Read(blocks []byte) (int, error) {
nBytes, err := reader.reader.Read(blocks)
if nBytes > 0 {
reader.stream.XORKeyStream(blocks[:nBytes], blocks[:nBytes])
}
return nBytes, err
}
// Cryption writer is a general purpose of byte stream writer that applies a stream cipher on top of a regular writer.
type CryptionWriter struct {
stream cipher.Stream
writer io.Writer
}
// NewCryptionWriter creates a new CryptionWriter from given stream cipher and writer.
func NewCryptionWriter(stream cipher.Stream, writer io.Writer) *CryptionWriter {
return &CryptionWriter{
stream: stream,
writer: writer,
}
}
// Crypt crypts the content of blocks without writing them into the underlying writer.
func (writer CryptionWriter) Crypt(blocks []byte) {
writer.stream.XORKeyStream(blocks, blocks)
}
// Write crypts the content of blocks in place, and then writes the give blocks to underlying writer.
func (writer CryptionWriter) Write(blocks []byte) (int, error) {
writer.Crypt(blocks)
return writer.writer.Write(blocks)
}
| common/io/encryption.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00019721323042176664,
0.00017758172180037946,
0.00016749791393522173,
0.00017384116654284298,
0.000010723380000854377
] |
{
"id": 0,
"code_window": [
"}\n",
"\n",
"func (queue *TimedQueue) cleanup(tick <-chan time.Time) {\n",
"\tfor now := range tick {\n",
"\t\tnowSec := now.UTC().Unix()\n",
"\t\tfor {\n",
"\t\t\tqueue.access.RLock()\n",
"\t\t\tqueueLen := queue.queue.Len()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\tnowSec := now.Unix()\n"
],
"file_path": "common/collect/timed_queue.go",
"type": "replace",
"edit_start_line_idx": 74
} | package wildcard_router
import (
"github.com/v2ray/v2ray-core/app/router"
v2net "github.com/v2ray/v2ray-core/common/net"
"github.com/v2ray/v2ray-core/config"
)
type WildcardRouter struct {
}
func (router *WildcardRouter) TakeDetour(packet v2net.Packet) (config.ConnectionTag, error) {
return "", nil
}
type WildcardRouterFactory struct {
}
func (factory *WildcardRouterFactory) Create(rawConfig interface{}) (router.Router, error) {
return &WildcardRouter{}, nil
}
func init() {
router.RegisterRouter("wildcard", &WildcardRouterFactory{})
}
| app/router/wildcard_router/router.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.0001815447467379272,
0.00017813622253015637,
0.00017317272431682795,
0.00017969118198379874,
0.0000035903690331906546
] |
{
"id": 1,
"code_window": [
"\tassert := unit.Assert(t)\n",
"\n",
"\tremoved := make(map[string]bool)\n",
"\n",
"\tnowSec := time.Now().UTC().Unix()\n",
"\tq := NewTimedQueue(2)\n",
"\n",
"\tgo func() {\n",
"\t\tfor {\n",
"\t\t\tentry := <-q.RemovedEntries()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnowSec := time.Now().Unix()\n"
],
"file_path": "common/collect/timed_queue_test.go",
"type": "replace",
"edit_start_line_idx": 14
} | package collect
import (
"container/heap"
"sync"
"time"
)
type timedQueueEntry struct {
timeSec int64
value interface{}
}
type timedQueueImpl []*timedQueueEntry
func (queue timedQueueImpl) Len() int {
return len(queue)
}
func (queue timedQueueImpl) Less(i, j int) bool {
return queue[i].timeSec < queue[j].timeSec
}
func (queue timedQueueImpl) Swap(i, j int) {
tmp := queue[i]
queue[i] = queue[j]
queue[j] = tmp
}
func (queue *timedQueueImpl) Push(value interface{}) {
entry := value.(*timedQueueEntry)
*queue = append(*queue, entry)
}
func (queue *timedQueueImpl) Pop() interface{} {
old := *queue
n := len(old)
v := old[n-1]
old[n-1] = nil
*queue = old[:n-1]
return v
}
type TimedQueue struct {
queue timedQueueImpl
access sync.RWMutex
removed chan interface{}
}
func NewTimedQueue(updateInterval int) *TimedQueue {
queue := &TimedQueue{
queue: make([]*timedQueueEntry, 0, 256),
removed: make(chan interface{}, 16),
access: sync.RWMutex{},
}
go queue.cleanup(time.Tick(time.Duration(updateInterval) * time.Second))
return queue
}
func (queue *TimedQueue) Add(value interface{}, time2Remove int64) {
queue.access.Lock()
heap.Push(&queue.queue, &timedQueueEntry{
timeSec: time2Remove,
value: value,
})
queue.access.Unlock()
}
func (queue *TimedQueue) RemovedEntries() <-chan interface{} {
return queue.removed
}
func (queue *TimedQueue) cleanup(tick <-chan time.Time) {
for now := range tick {
nowSec := now.UTC().Unix()
for {
queue.access.RLock()
queueLen := queue.queue.Len()
queue.access.RUnlock()
if queueLen == 0 {
break
}
queue.access.RLock()
entry := queue.queue[0]
queue.access.RUnlock()
if entry.timeSec > nowSec {
break
}
queue.access.Lock()
heap.Pop(&queue.queue)
queue.access.Unlock()
queue.removed <- entry.value
}
}
}
| common/collect/timed_queue.go | 1 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.9980765581130981,
0.2033812552690506,
0.00017052738985512406,
0.0016548354178667068,
0.39176008105278015
] |
{
"id": 1,
"code_window": [
"\tassert := unit.Assert(t)\n",
"\n",
"\tremoved := make(map[string]bool)\n",
"\n",
"\tnowSec := time.Now().UTC().Unix()\n",
"\tq := NewTimedQueue(2)\n",
"\n",
"\tgo func() {\n",
"\t\tfor {\n",
"\t\t\tentry := <-q.RemovedEntries()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnowSec := time.Now().Unix()\n"
],
"file_path": "common/collect/timed_queue_test.go",
"type": "replace",
"edit_start_line_idx": 14
} | package net
import (
"bytes"
"crypto/rand"
"io"
"io/ioutil"
"testing"
"github.com/v2ray/v2ray-core/common/alloc"
"github.com/v2ray/v2ray-core/testing/unit"
)
func TestReaderAndWrite(t *testing.T) {
assert := unit.Assert(t)
size := 1024 * 1024
buffer := make([]byte, size)
nBytes, err := rand.Read(buffer)
assert.Int(nBytes).Equals(len(buffer))
assert.Error(err).IsNil()
readerBuffer := bytes.NewReader(buffer)
writerBuffer := bytes.NewBuffer(make([]byte, 0, size))
transportChan := make(chan *alloc.Buffer, 1024)
err = ReaderToChan(transportChan, readerBuffer)
assert.Error(err).Equals(io.EOF)
close(transportChan)
err = ChanToWriter(writerBuffer, transportChan)
assert.Error(err).IsNil()
assert.Bytes(buffer).Equals(writerBuffer.Bytes())
}
type StaticReader struct {
total int
current int
}
func (reader *StaticReader) Read(b []byte) (size int, err error) {
size = len(b)
if size > reader.total-reader.current {
size = reader.total - reader.current
}
for i := 0; i < size; i++ {
b[i] = byte(i)
}
//rand.Read(b[:size])
reader.current += size
if reader.current == reader.total {
err = io.EOF
}
return
}
func BenchmarkTransport1K(b *testing.B) {
size := 1 * 1024
for i := 0; i < b.N; i++ {
runBenchmarkTransport(size)
}
}
func BenchmarkTransport2K(b *testing.B) {
size := 2 * 1024
for i := 0; i < b.N; i++ {
runBenchmarkTransport(size)
}
}
func BenchmarkTransport4K(b *testing.B) {
size := 4 * 1024
for i := 0; i < b.N; i++ {
runBenchmarkTransport(size)
}
}
func BenchmarkTransport10K(b *testing.B) {
size := 10 * 1024
for i := 0; i < b.N; i++ {
runBenchmarkTransport(size)
}
}
func BenchmarkTransport100K(b *testing.B) {
size := 100 * 1024
for i := 0; i < b.N; i++ {
runBenchmarkTransport(size)
}
}
func BenchmarkTransport1M(b *testing.B) {
size := 1024 * 1024
for i := 0; i < b.N; i++ {
runBenchmarkTransport(size)
}
}
func BenchmarkTransport10M(b *testing.B) {
size := 10 * 1024 * 1024
for i := 0; i < b.N; i++ {
runBenchmarkTransport(size)
}
}
func runBenchmarkTransport(size int) {
transportChanA := make(chan *alloc.Buffer, 16)
transportChanB := make(chan *alloc.Buffer, 16)
readerA := &StaticReader{size, 0}
readerB := &StaticReader{size, 0}
writerA := ioutil.Discard
writerB := ioutil.Discard
finishA := make(chan bool)
finishB := make(chan bool)
go func() {
ChanToWriter(writerA, transportChanA)
close(finishA)
}()
go func() {
ReaderToChan(transportChanA, readerA)
close(transportChanA)
}()
go func() {
ChanToWriter(writerB, transportChanB)
close(finishB)
}()
go func() {
ReaderToChan(transportChanB, readerB)
close(transportChanB)
}()
<-transportChanA
<-transportChanB
}
| common/net/transport_test.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00017770328850019723,
0.00017171799845527858,
0.0001668908225838095,
0.00017231091624125838,
0.000003261598521930864
] |
{
"id": 1,
"code_window": [
"\tassert := unit.Assert(t)\n",
"\n",
"\tremoved := make(map[string]bool)\n",
"\n",
"\tnowSec := time.Now().UTC().Unix()\n",
"\tq := NewTimedQueue(2)\n",
"\n",
"\tgo func() {\n",
"\t\tfor {\n",
"\t\t\tentry := <-q.RemovedEntries()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnowSec := time.Now().Unix()\n"
],
"file_path": "common/collect/timed_queue_test.go",
"type": "replace",
"edit_start_line_idx": 14
} | package json
import (
"github.com/v2ray/v2ray-core/config"
"github.com/v2ray/v2ray-core/config/json"
vmessconfig "github.com/v2ray/v2ray-core/proxy/vmess/config"
)
type Inbound struct {
AllowedClients []*ConfigUser `json:"clients"`
UDP bool `json:"udp"`
}
func (c *Inbound) AllowedUsers() []vmessconfig.User {
users := make([]vmessconfig.User, 0, len(c.AllowedClients))
for _, rawUser := range c.AllowedClients {
users = append(users, rawUser)
}
return users
}
func (c *Inbound) UDPEnabled() bool {
return c.UDP
}
func init() {
json.RegisterConfigType("vmess", config.TypeInbound, func() interface{} {
return new(Inbound)
})
}
| proxy/vmess/config/json/inbound.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.0001769287628121674,
0.00016959107597358525,
0.00016543651872780174,
0.0001679995039012283,
0.000004368334884929936
] |
{
"id": 1,
"code_window": [
"\tassert := unit.Assert(t)\n",
"\n",
"\tremoved := make(map[string]bool)\n",
"\n",
"\tnowSec := time.Now().UTC().Unix()\n",
"\tq := NewTimedQueue(2)\n",
"\n",
"\tgo func() {\n",
"\t\tfor {\n",
"\t\t\tentry := <-q.RemovedEntries()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tnowSec := time.Now().Unix()\n"
],
"file_path": "common/collect/timed_queue_test.go",
"type": "replace",
"edit_start_line_idx": 14
} | // +build !windows
package platform
import (
"os"
)
func ExpandEnv(s string) string {
return os.ExpandEnv(s)
}
func LineSeparator() string {
return "\n"
}
| common/platform/others.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00019661278929561377,
0.00018279333016835153,
0.00016897388559300452,
0.00018279333016835153,
0.00001381945185130462
] |
{
"id": 2,
"code_window": [
"\tremoved[\"Value2\"] = false\n",
"\n",
"\t<-tick\n",
"\tassert.Bool(removed[\"Values\"]).IsFalse()\n",
"\n",
"\tq.Add(\"Value1\", time.Now().UTC().Unix()+10)\n",
"\n",
"\t<-tick\n",
"\tv1, ok = removed[\"Value1\"]\n",
"\tassert.Bool(ok).IsTrue()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tq.Add(\"Value1\", time.Now().Unix()+10)\n"
],
"file_path": "common/collect/timed_queue_test.go",
"type": "replace",
"edit_start_line_idx": 53
} | // Package vmess contains protocol definition, io lib for VMess.
package protocol
import (
"crypto/aes"
"crypto/cipher"
"encoding/binary"
"hash/fnv"
"io"
"time"
"github.com/v2ray/v2ray-core/common/alloc"
v2io "github.com/v2ray/v2ray-core/common/io"
"github.com/v2ray/v2ray-core/common/log"
v2net "github.com/v2ray/v2ray-core/common/net"
"github.com/v2ray/v2ray-core/proxy"
"github.com/v2ray/v2ray-core/proxy/vmess/config"
"github.com/v2ray/v2ray-core/proxy/vmess/protocol/user"
"github.com/v2ray/v2ray-core/transport"
)
const (
addrTypeIPv4 = byte(0x01)
addrTypeIPv6 = byte(0x03)
addrTypeDomain = byte(0x02)
CmdTCP = byte(0x01)
CmdUDP = byte(0x02)
Version = byte(0x01)
blockSize = 16
)
// VMessRequest implements the request message of VMess protocol. It only contains the header of a
// request message. The data part will be handled by conection handler directly, in favor of data
// streaming.
type VMessRequest struct {
Version byte
UserId config.ID
RequestIV []byte
RequestKey []byte
ResponseHeader []byte
Command byte
Address v2net.Address
}
// Destination is the final destination of this request.
func (request *VMessRequest) Destination() v2net.Destination {
if request.Command == CmdTCP {
return v2net.NewTCPDestination(request.Address)
} else {
return v2net.NewUDPDestination(request.Address)
}
}
// VMessRequestReader is a parser to read VMessRequest from a byte stream.
type VMessRequestReader struct {
vUserSet user.UserSet
}
// NewVMessRequestReader creates a new VMessRequestReader with a given UserSet
func NewVMessRequestReader(vUserSet user.UserSet) *VMessRequestReader {
return &VMessRequestReader{
vUserSet: vUserSet,
}
}
// Read reads a VMessRequest from a byte stream.
func (r *VMessRequestReader) Read(reader io.Reader) (*VMessRequest, error) {
buffer := alloc.NewSmallBuffer()
nBytes, err := v2net.ReadAllBytes(reader, buffer.Value[:config.IDBytesLen])
if err != nil {
return nil, err
}
userId, timeSec, valid := r.vUserSet.GetUser(buffer.Value[:nBytes])
if !valid {
return nil, proxy.InvalidAuthentication
}
aesCipher, err := aes.NewCipher(userId.CmdKey())
if err != nil {
return nil, err
}
aesStream := cipher.NewCFBDecrypter(aesCipher, user.Int64Hash(timeSec))
decryptor := v2io.NewCryptionReader(aesStream, reader)
if err != nil {
return nil, err
}
nBytes, err = v2net.ReadAllBytes(decryptor, buffer.Value[:41])
if err != nil {
return nil, err
}
bufferLen := nBytes
request := &VMessRequest{
UserId: *userId,
Version: buffer.Value[0],
}
if request.Version != Version {
log.Warning("Invalid protocol version %d", request.Version)
return nil, proxy.InvalidProtocolVersion
}
request.RequestIV = buffer.Value[1:17] // 16 bytes
request.RequestKey = buffer.Value[17:33] // 16 bytes
request.ResponseHeader = buffer.Value[33:37] // 4 bytes
request.Command = buffer.Value[37]
port := binary.BigEndian.Uint16(buffer.Value[38:40])
switch buffer.Value[40] {
case addrTypeIPv4:
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:45]) // 4 bytes
bufferLen += 4
if err != nil {
return nil, err
}
request.Address = v2net.IPAddress(buffer.Value[41:45], port)
case addrTypeIPv6:
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:57]) // 16 bytes
bufferLen += 16
if err != nil {
return nil, err
}
request.Address = v2net.IPAddress(buffer.Value[41:57], port)
case addrTypeDomain:
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[41:42])
if err != nil {
return nil, err
}
domainLength := int(buffer.Value[41])
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[42:42+domainLength])
if err != nil {
return nil, err
}
bufferLen += 1 + domainLength
request.Address = v2net.DomainAddress(string(buffer.Value[42:42+domainLength]), port)
}
_, err = v2net.ReadAllBytes(decryptor, buffer.Value[bufferLen:bufferLen+4])
if err != nil {
return nil, err
}
fnv1a := fnv.New32a()
fnv1a.Write(buffer.Value[:bufferLen])
actualHash := fnv1a.Sum32()
expectedHash := binary.BigEndian.Uint32(buffer.Value[bufferLen : bufferLen+4])
if actualHash != expectedHash {
return nil, transport.CorruptedPacket
}
return request, nil
}
// ToBytes returns a VMessRequest in the form of byte array.
func (request *VMessRequest) ToBytes(idHash user.CounterHash, randomRangeInt64 user.RandomInt64InRange, buffer *alloc.Buffer) (*alloc.Buffer, error) {
if buffer == nil {
buffer = alloc.NewSmallBuffer().Clear()
}
counter := randomRangeInt64(time.Now().UTC().Unix(), 30)
hash := idHash.Hash(request.UserId.Bytes[:], counter)
buffer.Append(hash)
encryptionBegin := buffer.Len()
buffer.AppendBytes(request.Version)
buffer.Append(request.RequestIV)
buffer.Append(request.RequestKey)
buffer.Append(request.ResponseHeader)
buffer.AppendBytes(request.Command)
buffer.Append(request.Address.PortBytes())
switch {
case request.Address.IsIPv4():
buffer.AppendBytes(addrTypeIPv4)
buffer.Append(request.Address.IP())
case request.Address.IsIPv6():
buffer.AppendBytes(addrTypeIPv6)
buffer.Append(request.Address.IP())
case request.Address.IsDomain():
buffer.AppendBytes(addrTypeDomain, byte(len(request.Address.Domain())))
buffer.Append([]byte(request.Address.Domain()))
}
encryptionEnd := buffer.Len()
fnv1a := fnv.New32a()
fnv1a.Write(buffer.Value[encryptionBegin:encryptionEnd])
fnvHash := fnv1a.Sum32()
buffer.AppendBytes(byte(fnvHash>>24), byte(fnvHash>>16), byte(fnvHash>>8), byte(fnvHash))
encryptionEnd += 4
aesCipher, err := aes.NewCipher(request.UserId.CmdKey())
if err != nil {
return nil, err
}
aesStream := cipher.NewCFBEncrypter(aesCipher, user.Int64Hash(counter))
aesStream.XORKeyStream(buffer.Value[encryptionBegin:encryptionEnd], buffer.Value[encryptionBegin:encryptionEnd])
return buffer, nil
}
| proxy/vmess/protocol/vmess.go | 1 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00018599187023937702,
0.00017331945127807558,
0.00016425935609731823,
0.0001735182013362646,
0.000004656760665966431
] |
{
"id": 2,
"code_window": [
"\tremoved[\"Value2\"] = false\n",
"\n",
"\t<-tick\n",
"\tassert.Bool(removed[\"Values\"]).IsFalse()\n",
"\n",
"\tq.Add(\"Value1\", time.Now().UTC().Unix()+10)\n",
"\n",
"\t<-tick\n",
"\tv1, ok = removed[\"Value1\"]\n",
"\tassert.Bool(ok).IsTrue()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tq.Add(\"Value1\", time.Now().Unix()+10)\n"
],
"file_path": "common/collect/timed_queue_test.go",
"type": "replace",
"edit_start_line_idx": 53
} | package protocol
import (
"encoding/binary"
"errors"
"github.com/v2ray/v2ray-core/common/alloc"
"github.com/v2ray/v2ray-core/common/log"
v2net "github.com/v2ray/v2ray-core/common/net"
)
var (
ErrorUnknownAddressType = errors.New("Unknown Address Type.")
)
type Socks5UDPRequest struct {
Fragment byte
Address v2net.Address
Data *alloc.Buffer
}
func (request *Socks5UDPRequest) Destination() v2net.Destination {
return v2net.NewUDPDestination(request.Address)
}
func (request *Socks5UDPRequest) Write(buffer *alloc.Buffer) {
buffer.AppendBytes(0, 0, request.Fragment)
switch {
case request.Address.IsIPv4():
buffer.AppendBytes(AddrTypeIPv4).Append(request.Address.IP())
case request.Address.IsIPv6():
buffer.AppendBytes(AddrTypeIPv6).Append(request.Address.IP())
case request.Address.IsDomain():
buffer.AppendBytes(AddrTypeDomain, byte(len(request.Address.Domain()))).Append([]byte(request.Address.Domain()))
}
buffer.Append(request.Address.PortBytes())
buffer.Append(request.Data.Value)
}
func ReadUDPRequest(packet []byte) (request Socks5UDPRequest, err error) {
// packet[0] and packet[1] are reserved
request.Fragment = packet[2]
addrType := packet[3]
var dataBegin int
switch addrType {
case AddrTypeIPv4:
ip := packet[4:8]
port := binary.BigEndian.Uint16(packet[8:10])
request.Address = v2net.IPAddress(ip, port)
dataBegin = 10
case AddrTypeIPv6:
ip := packet[4:20]
port := binary.BigEndian.Uint16(packet[20:22])
request.Address = v2net.IPAddress(ip, port)
dataBegin = 22
case AddrTypeDomain:
domainLength := int(packet[4])
domain := string(packet[5 : 5+domainLength])
port := binary.BigEndian.Uint16(packet[5+domainLength : 5+domainLength+2])
request.Address = v2net.DomainAddress(domain, port)
dataBegin = 5 + domainLength + 2
default:
log.Warning("Unknown address type %d", addrType)
err = ErrorUnknownAddressType
return
}
request.Data = alloc.NewBuffer().Clear().Append(packet[dataBegin:])
return
}
| proxy/socks/protocol/udp.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00019011744007002562,
0.00017467213911004364,
0.000164565077284351,
0.00017372608999721706,
0.000006993699571467005
] |
{
"id": 2,
"code_window": [
"\tremoved[\"Value2\"] = false\n",
"\n",
"\t<-tick\n",
"\tassert.Bool(removed[\"Values\"]).IsFalse()\n",
"\n",
"\tq.Add(\"Value1\", time.Now().UTC().Unix()+10)\n",
"\n",
"\t<-tick\n",
"\tv1, ok = removed[\"Value1\"]\n",
"\tassert.Bool(ok).IsTrue()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tq.Add(\"Value1\", time.Now().Unix()+10)\n"
],
"file_path": "common/collect/timed_queue_test.go",
"type": "replace",
"edit_start_line_idx": 53
} | package socks
import (
"github.com/v2ray/v2ray-core/app"
"github.com/v2ray/v2ray-core/proxy"
"github.com/v2ray/v2ray-core/proxy/socks/config/json"
)
type SocksServerFactory struct {
}
func (factory SocksServerFactory) Create(dispatcher app.PacketDispatcher, rawConfig interface{}) (proxy.InboundConnectionHandler, error) {
config := rawConfig.(*json.SocksConfig)
config.Initialize()
return NewSocksServer(dispatcher, config), nil
}
func init() {
proxy.RegisterInboundConnectionHandlerFactory("socks", SocksServerFactory{})
}
| proxy/socks/socksfactory.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00017724705685395747,
0.00017524539725854993,
0.0001733851822791621,
0.00017510393809061497,
0.0000015797733112776768
] |
{
"id": 2,
"code_window": [
"\tremoved[\"Value2\"] = false\n",
"\n",
"\t<-tick\n",
"\tassert.Bool(removed[\"Values\"]).IsFalse()\n",
"\n",
"\tq.Add(\"Value1\", time.Now().UTC().Unix()+10)\n",
"\n",
"\t<-tick\n",
"\tv1, ok = removed[\"Value1\"]\n",
"\tassert.Bool(ok).IsTrue()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tq.Add(\"Value1\", time.Now().Unix()+10)\n"
],
"file_path": "common/collect/timed_queue_test.go",
"type": "replace",
"edit_start_line_idx": 53
} | {
"port": 1080,
"log": {
"access": ""
},
"inbound": {
"protocol": "socks",
"settings": {
"auth": "noauth",
"udp": false,
"ip": "127.0.0.1"
}
},
"outbound": {
"protocol": "vmess",
"settings": {
"vnext": [
{
"address": "127.0.0.1",
"port": 27183,
"users": [
{"id": "ad937d9d-6e23-4a5a-ba23-bce5092a7c51"}
],
"network": "tcp"
}
]
}
}
}
| release/config/vpoint_socks_vmess.json | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00017642791499383748,
0.00017511907208245248,
0.00017299958562944084,
0.00017592967196833342,
0.0000015124334140637075
] |
{
"id": 3,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (us *TimedUserSet) updateUserHash(tick <-chan time.Time) {\n",
"\tlastSec := time.Now().UTC().Unix() - cacheDurationSec\n",
"\n",
"\tfor now := range tick {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tlastSec := time.Now().Unix() - cacheDurationSec\n"
],
"file_path": "proxy/vmess/protocol/user/userset.go",
"type": "replace",
"edit_start_line_idx": 65
} | package user
import (
"sync"
"time"
"github.com/v2ray/v2ray-core/common/collect"
"github.com/v2ray/v2ray-core/proxy/vmess/config"
)
const (
updateIntervalSec = 10
cacheDurationSec = 120
)
type UserSet interface {
AddUser(user config.User) error
GetUser(timeHash []byte) (*config.ID, int64, bool)
}
type TimedUserSet struct {
validUserIds []*config.ID
userHash map[string]indexTimePair
userHashDeleteQueue *collect.TimedQueue
access sync.RWMutex
}
type indexTimePair struct {
index int
timeSec int64
}
func NewTimedUserSet() UserSet {
tus := &TimedUserSet{
validUserIds: make([]*config.ID, 0, 16),
userHash: make(map[string]indexTimePair, 512),
userHashDeleteQueue: collect.NewTimedQueue(updateIntervalSec),
access: sync.RWMutex{},
}
go tus.updateUserHash(time.Tick(updateIntervalSec * time.Second))
go tus.removeEntries(tus.userHashDeleteQueue.RemovedEntries())
return tus
}
func (us *TimedUserSet) removeEntries(entries <-chan interface{}) {
for entry := range entries {
us.access.Lock()
delete(us.userHash, entry.(string))
us.access.Unlock()
}
}
func (us *TimedUserSet) generateNewHashes(lastSec, nowSec int64, idx int, id *config.ID) {
idHash := NewTimeHash(HMACHash{})
for lastSec < nowSec {
idHash := idHash.Hash(id.Bytes[:], lastSec)
us.access.Lock()
us.userHash[string(idHash)] = indexTimePair{idx, lastSec}
us.access.Unlock()
us.userHashDeleteQueue.Add(string(idHash), lastSec+2*cacheDurationSec)
lastSec++
}
}
func (us *TimedUserSet) updateUserHash(tick <-chan time.Time) {
lastSec := time.Now().UTC().Unix() - cacheDurationSec
for now := range tick {
nowSec := now.UTC().Unix() + cacheDurationSec
for idx, id := range us.validUserIds {
us.generateNewHashes(lastSec, nowSec, idx, id)
}
lastSec = nowSec
}
}
func (us *TimedUserSet) AddUser(user config.User) error {
id := user.ID()
idx := len(us.validUserIds)
us.validUserIds = append(us.validUserIds, id)
nowSec := time.Now().UTC().Unix()
lastSec := nowSec - cacheDurationSec
us.generateNewHashes(lastSec, nowSec+cacheDurationSec, idx, id)
return nil
}
func (us TimedUserSet) GetUser(userHash []byte) (*config.ID, int64, bool) {
defer us.access.RUnlock()
us.access.RLock()
pair, found := us.userHash[string(userHash)]
if found {
return us.validUserIds[pair.index], pair.timeSec, true
}
return nil, 0, false
}
| proxy/vmess/protocol/user/userset.go | 1 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.9989494681358337,
0.7371603846549988,
0.0001891228457679972,
0.9973317384719849,
0.4014653265476227
] |
{
"id": 3,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (us *TimedUserSet) updateUserHash(tick <-chan time.Time) {\n",
"\tlastSec := time.Now().UTC().Unix() - cacheDurationSec\n",
"\n",
"\tfor now := range tick {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tlastSec := time.Now().Unix() - cacheDurationSec\n"
],
"file_path": "proxy/vmess/protocol/user/userset.go",
"type": "replace",
"edit_start_line_idx": 65
} | # V2Ray 安装方式
## 预编译程序
发布于 [Release](https://github.com/v2ray/v2ray-core/releases) 中,每周更新,[更新周期见此](https://github.com/V2Ray/v2ray-core/blob/master/spec/roadmap.md)。
其中:
* v2ray-linux-32.zip: 适用于 32 位 Linux,各种发行版均可。
* v2ray-linux-64.zip: 适用于 64 位 Linux,各种发行版均可。
* v2ray-linux-arm.zip: 适用于 ARMv6 及之后平台的 Linux,如 Raspberry Pi。
* v2ray-linux-arm64.zip: 适用于 ARMv8 及之后平台的 Linux。
* v2ray-linux-macos.zip: 适用于 Mac OS X 10.7 以及之后版本。
* v2ray-windows-32.zip: 适用于 32 位 Windows,Vista 及之后版本。
* v2ray-windows-64.zip: 适用于 64 位 Windows,Vista 及之后版本。
## 编译源文件
大概流程,请根据实际情况修改
1. 安装 Git: sudo apt-get install git -y
2. 安装 golang:
1. 下载安装文件:
1. 64位:curl -o go_latest.tar.gz https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
2. 32位:curl -o go_latest.tar.gz https://storage.googleapis.com/golang/go1.5.1.linux-386.tar.gz
2. sudo tar -C /usr/local -xzf go_latest.tar.gz
3. export PATH=$PATH:/usr/local/go/bin
4. export GOPATH=$HOME/work
3. 下载 V2Ray 源文件:go get -u github.com/v2ray/v2ray-core
4. 生成编译脚本:go install github.com/v2ray/v2ray-core/tools/build
5. 编译 V2Ray:$GOPATH/bin/build
6. V2Ray 程序及配置文件会被放在 $GOPATH/bin/v2ray-XXX 文件夹下(XXX 视平台不同而不同)
### Arch Linux
1. 安装 Git: sudo pacman -S git
2. 安装 golang:sudo pacman -S go
1. export GOPATH=$HOME/work
3. go get -u github.com/v2ray/v2ray-core
4. go install github.com/v2ray/v2ray-core/tools/build
5. $GOPATH/bin/build
### Debian / Ubuntu
bash <(curl -s https://raw.githubusercontent.com/v2ray/v2ray-core/master/release/install.sh)
此脚本会自动安装 git 和 golan 1.5 (如果系统上没有的话,并且需要 root 权限),然后把 v2ray 编译到 $GOPATH/bin/v2ray,新装的 golang 会把 GOPATH 设定到 /v2ray。
## 配置和运行
[链接](https://github.com/V2Ray/v2ray-core/blob/master/spec/guide.md)
| spec/install.md | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.0001686109317233786,
0.00016686064191162586,
0.00016481144120916724,
0.0001672709477134049,
0.0000013095972235532827
] |
{
"id": 3,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (us *TimedUserSet) updateUserHash(tick <-chan time.Time) {\n",
"\tlastSec := time.Now().UTC().Unix() - cacheDurationSec\n",
"\n",
"\tfor now := range tick {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tlastSec := time.Now().Unix() - cacheDurationSec\n"
],
"file_path": "proxy/vmess/protocol/user/userset.go",
"type": "replace",
"edit_start_line_idx": 65
} | package json
import (
"encoding/json"
"github.com/v2ray/v2ray-core/proxy/vmess/config"
)
// ConfigUser is an user account in VMess configuration.
type ConfigUser struct {
Id *config.ID
Email string
}
func (u *ConfigUser) UnmarshalJSON(data []byte) error {
type rawUser struct {
IdString string `json:"id"`
EmailString string `json:"email"`
}
var rawUserValue rawUser
if err := json.Unmarshal(data, &rawUserValue); err != nil {
return err
}
id, err := config.NewID(rawUserValue.IdString)
if err != nil {
return err
}
u.Id = id
u.Email = rawUserValue.EmailString
return nil
}
func (u *ConfigUser) ID() *config.ID {
return u.Id
}
| proxy/vmess/config/json/user.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.0034342731814831495,
0.001141026965342462,
0.00017711645341478288,
0.0004763590404763818,
0.0013314918614923954
] |
{
"id": 3,
"code_window": [
"\t}\n",
"}\n",
"\n",
"func (us *TimedUserSet) updateUserHash(tick <-chan time.Time) {\n",
"\tlastSec := time.Now().UTC().Unix() - cacheDurationSec\n",
"\n",
"\tfor now := range tick {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tlastSec := time.Now().Unix() - cacheDurationSec\n"
],
"file_path": "proxy/vmess/protocol/user/userset.go",
"type": "replace",
"edit_start_line_idx": 65
} | package socks
import (
"net"
"github.com/v2ray/v2ray-core/common/alloc"
"github.com/v2ray/v2ray-core/common/log"
v2net "github.com/v2ray/v2ray-core/common/net"
"github.com/v2ray/v2ray-core/proxy/socks/protocol"
)
var udpAddress v2net.Address
func (server *SocksServer) ListenUDP(port uint16) error {
addr := &net.UDPAddr{
IP: net.IP{0, 0, 0, 0},
Port: int(port),
Zone: "",
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
log.Error("Socks failed to listen UDP on port %d: %v", port, err)
return err
}
udpAddress = v2net.IPAddress(server.config.IP(), port)
go server.AcceptPackets(conn)
return nil
}
func (server *SocksServer) getUDPAddr() v2net.Address {
return udpAddress
}
func (server *SocksServer) AcceptPackets(conn *net.UDPConn) error {
for {
buffer := alloc.NewBuffer()
nBytes, addr, err := conn.ReadFromUDP(buffer.Value)
if err != nil {
log.Error("Socks failed to read UDP packets: %v", err)
buffer.Release()
continue
}
log.Info("Client UDP connection from %v", addr)
request, err := protocol.ReadUDPRequest(buffer.Value[:nBytes])
buffer.Release()
if err != nil {
log.Error("Socks failed to parse UDP request: %v", err)
request.Data.Release()
continue
}
if request.Fragment != 0 {
log.Warning("Dropping fragmented UDP packets.")
// TODO handle fragments
request.Data.Release()
continue
}
udpPacket := v2net.NewPacket(request.Destination(), request.Data, false)
log.Info("Send packet to %s with %d bytes", udpPacket.Destination().String(), request.Data.Len())
go server.handlePacket(conn, udpPacket, addr, request.Address)
}
}
func (server *SocksServer) handlePacket(conn *net.UDPConn, packet v2net.Packet, clientAddr *net.UDPAddr, targetAddr v2net.Address) {
ray := server.dispatcher.DispatchToOutbound(packet)
close(ray.InboundInput())
for data := range ray.InboundOutput() {
response := &protocol.Socks5UDPRequest{
Fragment: 0,
Address: targetAddr,
Data: data,
}
log.Info("Writing back UDP response with %d bytes from %s to %s", data.Len(), targetAddr.String(), clientAddr.String())
udpMessage := alloc.NewSmallBuffer().Clear()
response.Write(udpMessage)
nBytes, err := conn.WriteToUDP(udpMessage.Value, clientAddr)
udpMessage.Release()
response.Data.Release()
if err != nil {
log.Error("Socks failed to write UDP message (%d bytes) to %s: %v", nBytes, clientAddr.String(), err)
}
}
}
| proxy/socks/udp.go | 0 | https://github.com/v2ray/v2ray-core/commit/fdb41bbd50444b702940b117efee5216a4f4756a | [
0.00018902169540524483,
0.0001710852957330644,
0.00016479009354952723,
0.00016758800484240055,
0.000007437259682774311
] |
Subsets and Splits