hunk
dict
file
stringlengths
0
11.8M
file_path
stringlengths
2
234
label
int64
0
1
commit_url
stringlengths
74
103
dependency_score
sequencelengths
5
5
{ "id": 7, "code_window": [ "\t\t}\n", "\t})\n", "}\n", "\n", "func (a *AdsTest) adsReceiveChannel() {\n", "\tgo func() {\n", "\t\t<-a.context.Done()\n", "\t\ta.Cleanup()\n", "\t}()\n", "\tfor {\n", "\t\tresp, err := a.client.Recv()\n", "\t\tif err != nil {\n", "\t\t\tif isUnexpectedError(err) {\n", "\t\t\t\tlog.Warnf(\"ads received error: %v\", err)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontext.AfterFunc(a.context, a.Cleanup)\n" ], "file_path": "pilot/pkg/xds/adstest.go", "type": "replace", "edit_start_line_idx": 99 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package xds import ( "context" "sync" "time" core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3" sds "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3" "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "istio.io/istio/pilot/pkg/model" v3 "istio.io/istio/pilot/pkg/xds/v3" "istio.io/istio/pkg/test" ) func NewAdsTest(t test.Failer, conn *grpc.ClientConn) *AdsTest { return NewXdsTest(t, conn, func(conn *grpc.ClientConn) (DiscoveryClient, error) { xds := discovery.NewAggregatedDiscoveryServiceClient(conn) return xds.StreamAggregatedResources(context.Background()) }) } func NewSdsTest(t test.Failer, conn *grpc.ClientConn) *AdsTest { return NewXdsTest(t, conn, func(conn *grpc.ClientConn) (DiscoveryClient, error) { xds := sds.NewSecretDiscoveryServiceClient(conn) return xds.StreamSecrets(context.Background()) }).WithType(v3.SecretType) } func NewXdsTest(t test.Failer, conn *grpc.ClientConn, getClient func(conn *grpc.ClientConn) (DiscoveryClient, error)) *AdsTest { ctx, cancel := context.WithCancel(context.Background()) cl, err := getClient(conn) if err != nil { t.Fatal(err) } resp := &AdsTest{ client: cl, conn: conn, context: ctx, cancelContext: cancel, ID: "sidecar~1.1.1.1~test.default~default.svc.cluster.local", timeout: time.Second, Type: v3.ClusterType, responses: make(chan *discovery.DiscoveryResponse), error: make(chan error), } t.Cleanup(resp.Cleanup) go resp.adsReceiveChannel() return resp } type AdsTest struct { client DiscoveryClient responses chan *discovery.DiscoveryResponse error chan error conn *grpc.ClientConn metadata model.NodeMetadata ID string Type string cancelOnce sync.Once context context.Context cancelContext context.CancelFunc timeout time.Duration } func (a *AdsTest) Cleanup() { // Place in once to avoid race when two callers attempt to cleanup a.cancelOnce.Do(func() { a.cancelContext() _ = a.client.CloseSend() if a.conn != nil { _ = a.conn.Close() } }) } func (a *AdsTest) adsReceiveChannel() { go func() { <-a.context.Done() a.Cleanup() }() for { resp, err := a.client.Recv() if err != nil { if isUnexpectedError(err) { log.Warnf("ads received error: %v", err) } select { case a.error <- err: case <-a.context.Done(): } return } select { case a.responses <- resp: case <-a.context.Done(): return } } } // DrainResponses reads all responses, but does nothing to them func (a *AdsTest) DrainResponses() { for { select { case <-a.context.Done(): return case r := <-a.responses: log.Infof("drained response %v", r.TypeUrl) } } } // ExpectResponse waits until a response is received and returns it func (a *AdsTest) ExpectResponse(t test.Failer) *discovery.DiscoveryResponse { t.Helper() select { case <-time.After(a.timeout): t.Fatalf("did not get response in time") case resp := <-a.responses: if resp == nil || len(resp.Resources) == 0 { t.Fatalf("got empty response") } return resp case err := <-a.error: t.Fatalf("got error: %v", err) } return nil } // ExpectError waits until an error is received and returns it func (a *AdsTest) ExpectError(t test.Failer) error { t.Helper() select { case <-time.After(a.timeout): t.Fatalf("did not get error in time") case err := <-a.error: return err } return nil } // ExpectNoResponse waits a short period of time and ensures no response is received func (a *AdsTest) ExpectNoResponse(t test.Failer) { t.Helper() select { case <-time.After(time.Millisecond * 50): return case resp := <-a.responses: t.Fatalf("got unexpected response: %v", resp) } } func (a *AdsTest) fillInRequestDefaults(req *discovery.DiscoveryRequest) *discovery.DiscoveryRequest { if req == nil { req = &discovery.DiscoveryRequest{} } if req.TypeUrl == "" { req.TypeUrl = a.Type } if req.Node == nil { req.Node = &core.Node{ Id: a.ID, Metadata: a.metadata.ToStruct(), } } return req } func (a *AdsTest) Request(t test.Failer, req *discovery.DiscoveryRequest) { t.Helper() req = a.fillInRequestDefaults(req) if err := a.client.Send(req); err != nil { t.Fatal(err) } } // RequestResponseAck does a full XDS exchange: Send a request, get a response, and ACK the response func (a *AdsTest) RequestResponseAck(t test.Failer, req *discovery.DiscoveryRequest) *discovery.DiscoveryResponse { t.Helper() req = a.fillInRequestDefaults(req) a.Request(t, req) resp := a.ExpectResponse(t) req.ResponseNonce = resp.Nonce req.VersionInfo = resp.VersionInfo a.Request(t, req) return resp } // RequestResponseAck does a full XDS exchange with an error: Send a request, get a response, and NACK the response func (a *AdsTest) RequestResponseNack(t test.Failer, req *discovery.DiscoveryRequest) *discovery.DiscoveryResponse { t.Helper() if req == nil { req = &discovery.DiscoveryRequest{} } a.Request(t, req) resp := a.ExpectResponse(t) req.ResponseNonce = resp.Nonce req.ErrorDetail = &status.Status{Message: "Test request NACK"} a.Request(t, req) return resp } func (a *AdsTest) WithID(id string) *AdsTest { a.ID = id return a } func (a *AdsTest) WithType(typeURL string) *AdsTest { a.Type = typeURL return a } func (a *AdsTest) WithMetadata(m model.NodeMetadata) *AdsTest { a.metadata = m return a } func (a *AdsTest) WithTimeout(t time.Duration) *AdsTest { a.timeout = t return a }
pilot/pkg/xds/adstest.go
1
https://github.com/istio/istio/commit/7fc69708a1ff4d4cfee27b1e4b1105f223f9903d
[ 0.9984532594680786, 0.22124308347702026, 0.00017000250227283686, 0.012986806221306324, 0.37029385566711426 ]
{ "id": 7, "code_window": [ "\t\t}\n", "\t})\n", "}\n", "\n", "func (a *AdsTest) adsReceiveChannel() {\n", "\tgo func() {\n", "\t\t<-a.context.Done()\n", "\t\ta.Cleanup()\n", "\t}()\n", "\tfor {\n", "\t\tresp, err := a.client.Recv()\n", "\t\tif err != nil {\n", "\t\t\tif isUnexpectedError(err) {\n", "\t\t\t\tlog.Warnf(\"ads received error: %v\", err)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontext.AfterFunc(a.context, a.Cleanup)\n" ], "file_path": "pilot/pkg/xds/adstest.go", "type": "replace", "edit_start_line_idx": 99 }
/* // Copyright Istio Authors */ /* // */ /* // Licensed under the Apache License, Version 2.0 (the "License"); */ /* // you may not use this file except in compliance with the License. */ /* // You may obtain a copy of the License at */ /* // */ /* // http://www.apache.org/licenses/LICENSE-2.0 */ /* // */ /* // Unless required by applicable law or agreed to in writing, software */ /* // distributed under the License is distributed on an "AS IS" BASIS, */ /* // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ /* // See the License for the specific language governing permissions and */ /* // limitations under the License. */ /* PrismJS 1.14.0 http://prismjs.com/download.html#themes=prism-tomorrow&languages=clike+javascript+bash+docker+go+java+protobuf+python+yaml */ /** * prism.js tomorrow night eighties for JavaScript, CoffeeScript, CSS and HTML * Based on https://github.com/chriskempson/tomorrow-theme * @author Rose Pritchard */ .token.comment, .token.block-comment, .token.prolog, .token.doctype, .token.cdata { color: #999; } .token.punctuation { color: #ccc; } .token.tag, .token.attr-name, .token.namespace, .token.deleted { color: #e2777a; } .token.function-name { color: #6196cc; } .token.boolean, .token.number, .token.function { color: #f08d49; } .token.property, .token.class-name, .token.constant, .token.symbol { color: #f8c555; } .token.selector, .token.important, .token.atrule, .token.keyword, .token.builtin { color: #cc99cd; } .token.string, .token.char, .token.attr-value, .token.regex, .token.variable { color: #7ec699; } .token.operator, .token.entity, .token.url { color: #67cdcc; } .token.important, .token.bold { font-weight: bold; } .token.italic { font-style: italic; } .token.entity { cursor: help; } .token.inserted { color: green; }
pkg/ctrlz/assets/static/css/dark_syntax-1.14.0.css
0
https://github.com/istio/istio/commit/7fc69708a1ff4d4cfee27b1e4b1105f223f9903d
[ 0.000177891124621965, 0.00017596245743334293, 0.0001728127826936543, 0.00017676690185908228, 0.0000016948423535723123 ]
{ "id": 7, "code_window": [ "\t\t}\n", "\t})\n", "}\n", "\n", "func (a *AdsTest) adsReceiveChannel() {\n", "\tgo func() {\n", "\t\t<-a.context.Done()\n", "\t\ta.Cleanup()\n", "\t}()\n", "\tfor {\n", "\t\tresp, err := a.client.Recv()\n", "\t\tif err != nil {\n", "\t\t\tif isUnexpectedError(err) {\n", "\t\t\t\tlog.Warnf(\"ads received error: %v\", err)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontext.AfterFunc(a.context, a.Cleanup)\n" ], "file_path": "pilot/pkg/xds/adstest.go", "type": "replace", "edit_start_line_idx": 99 }
kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} subjects: - kind: ServiceAccount name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} namespace: {{.Release.Namespace}} roleRef: kind: ClusterRole name: istio-operator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }} apiGroup: rbac.authorization.k8s.io ---
manifests/charts/istio-operator/templates/clusterrole_binding.yaml
0
https://github.com/istio/istio/commit/7fc69708a1ff4d4cfee27b1e4b1105f223f9903d
[ 0.00017595282406546175, 0.00017573691729921848, 0.0001755210105329752, 0.00017573691729921848, 2.1590676624327898e-7 ]
{ "id": 7, "code_window": [ "\t\t}\n", "\t})\n", "}\n", "\n", "func (a *AdsTest) adsReceiveChannel() {\n", "\tgo func() {\n", "\t\t<-a.context.Done()\n", "\t\ta.Cleanup()\n", "\t}()\n", "\tfor {\n", "\t\tresp, err := a.client.Recv()\n", "\t\tif err != nil {\n", "\t\t\tif isUnexpectedError(err) {\n", "\t\t\t\tlog.Warnf(\"ads received error: %v\", err)\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontext.AfterFunc(a.context, a.Cleanup)\n" ], "file_path": "pilot/pkg/xds/adstest.go", "type": "replace", "edit_start_line_idx": 99 }
NAME CLUSTER CDS LDS EDS RDS ECDS ISTIOD VERSION proxy1 cluster1 STALE SYNCED SYNCED NOT SENT NOT SENT istiod1 1.1 proxy2 cluster2 STALE SYNCED STALE SYNCED NOT SENT istiod1 1.1
istioctl/pkg/writer/pilot/testdata/multiStatusSinglePilot.txt
0
https://github.com/istio/istio/commit/7fc69708a1ff4d4cfee27b1e4b1105f223f9903d
[ 0.00017321316408924758, 0.00017321316408924758, 0.00017321316408924758, 0.00017321316408924758, 0 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t})\n", "}\n", "\n", "func (a *DeltaAdsTest) adsReceiveChannel() {\n", "\tgo func() {\n", "\t\t<-a.context.Done()\n", "\t\ta.Cleanup()\n", "\t}()\n", "\tfor {\n", "\t\tresp, err := a.client.Recv()\n", "\t\tif err != nil {\n", "\t\t\tif isUnexpectedError(err) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontext.AfterFunc(a.context, a.Cleanup)\n" ], "file_path": "pilot/pkg/xds/deltaadstest.go", "type": "replace", "edit_start_line_idx": 97 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package nodeagent import ( "context" "errors" "fmt" "io" "net" "os" "sync" "time" "golang.org/x/sys/unix" "google.golang.org/protobuf/proto" "istio.io/istio/pkg/monitoring" "istio.io/istio/pkg/zdsapi" ) var ( ztunnelKeepAliveCheckInterval = 5 * time.Second readWriteDeadline = 5 * time.Second ) var ztunnelConnected = monitoring.NewGauge("ztunnel_connected", "number of connections to ztunnel") type ZtunnelServer interface { Run(ctx context.Context) PodDeleted(ctx context.Context, uid string) error PodAdded(ctx context.Context, uid string, netns Netns) error Close() error } /* To clean up stale ztunnels we may need to ztunnel to send its (uid, bootid / boot time) to us so that we can remove stale entries when the ztunnel pod is deleted or when the ztunnel pod is restarted in the same pod (remove old entries when the same uid connects again, but with different boot id?) save a queue of what needs to be sent to the ztunnel pod and send it one by one when it connects. when a new ztunnel connects with different uid, only propagate deletes to older ztunnels. */ type connMgr struct { connectionSet map[*ZtunnelConnection]struct{} latestConn *ZtunnelConnection mu sync.Mutex } func (c *connMgr) addConn(conn *ZtunnelConnection) { log.Debug("ztunnel connected") c.mu.Lock() defer c.mu.Unlock() c.connectionSet[conn] = struct{}{} c.latestConn = conn ztunnelConnected.RecordInt(int64(len(c.connectionSet))) } func (c *connMgr) LatestConn() *ZtunnelConnection { c.mu.Lock() defer c.mu.Unlock() return c.latestConn } func (c *connMgr) deleteConn(conn *ZtunnelConnection) { log.Debug("ztunnel disconnected") c.mu.Lock() defer c.mu.Unlock() delete(c.connectionSet, conn) if c.latestConn == conn { c.latestConn = nil } ztunnelConnected.RecordInt(int64(len(c.connectionSet))) } // this is used in tests // nolint: unused func (c *connMgr) len() int { c.mu.Lock() defer c.mu.Unlock() return len(c.connectionSet) } type ztunnelServer struct { listener *net.UnixListener // connections to pod delivered map // add pod goes to newest connection // delete pod goes to all connections conns *connMgr pods PodNetnsCache } var _ ZtunnelServer = &ztunnelServer{} func newZtunnelServer(addr string, pods PodNetnsCache) (*ztunnelServer, error) { if addr == "" { return nil, fmt.Errorf("addr cannot be empty") } resolvedAddr, err := net.ResolveUnixAddr("unixpacket", addr) if err != nil { return nil, fmt.Errorf("failed to resolve unix addr: %w", err) } // remove potentially existing address // Remove unix socket before use, if one is leftover from previous CNI restart if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { // Anything other than "file not found" is an error. return nil, fmt.Errorf("failed to remove unix://%s: %w", addr, err) } l, err := net.ListenUnix("unixpacket", resolvedAddr) if err != nil { return nil, fmt.Errorf("failed to listen unix: %w", err) } return &ztunnelServer{ listener: l, conns: &connMgr{ connectionSet: map[*ZtunnelConnection]struct{}{}, }, pods: pods, }, nil } func (z *ztunnelServer) Close() error { return z.listener.Close() } func (z *ztunnelServer) Run(ctx context.Context) { go func() { <-ctx.Done() z.Close() }() for { log.Debug("accepting conn") conn, err := z.accept() if err != nil { if errors.Is(err, net.ErrClosed) { log.Debug("listener closed - returning") return } log.Errorf("failed to accept conn: %v", err) continue } log.Debug("connection accepted") go func() { log.Debug("handling conn") if err := z.handleConn(ctx, conn); err != nil { log.Errorf("failed to handle conn: %v", err) } }() } } // ZDS protocol is very simple, for every message sent, and ack is sent. // the ack only has temporal correlation (i.e. it is the first and only ack msg after the message was sent) // All this to say, that we want to make sure that message to ztunnel are sent from a single goroutine // so we don't mix messages and acks. // nolint: unparam func (z *ztunnelServer) handleConn(ctx context.Context, conn *ZtunnelConnection) error { defer conn.Close() go func() { <-ctx.Done() log.Debug("context cancelled - closing conn") conn.Close() }() // before doing anything, add the connection to the list of active connections z.conns.addConn(conn) defer z.conns.deleteConn(conn) // get hello message from ztunnel m, _, err := readProto[zdsapi.ZdsHello](conn.u, readWriteDeadline, nil) if err != nil { return err } log.Infof("received hello from ztunnel. %v", m.Version) log.Debug("sending snapshot to ztunnel") if err := z.sendSnapshot(ctx, conn); err != nil { return err } for { // listen for updates: select { case update, ok := <-conn.Updates: if !ok { log.Debug("update channel closed - returning") return nil } log.Debugf("got update to send to ztunnel") resp, err := conn.sendDataAndWaitForAck(update.Update, update.Fd) if err != nil { log.Errorf("ztunnel acked error: err %v ackErr %s", err, resp.GetAck().GetError()) } log.Debugf("ztunnel acked") // Safety: Resp is buffered, so this will not block update.Resp <- updateResponse{ err: err, resp: resp, } case <-time.After(ztunnelKeepAliveCheckInterval): // do a short read, just to see if the connection to ztunnel is // still alive. As ztunnel shouldn't send anything unless we send // something first, we expect to get an os.ErrDeadlineExceeded error // here if the connection is still alive. // note that unlike tcp connections, reading is a good enough test here. _, err := conn.readMessage(time.Second / 100) switch { case !errors.Is(err, os.ErrDeadlineExceeded): log.Debugf("ztunnel keepalive failed: %v", err) if errors.Is(err, io.EOF) { log.Debug("ztunnel EOF") return nil } return err case err == nil: log.Warn("ztunnel protocol error, unexpected message") return fmt.Errorf("ztunnel protocol error, unexpected message") default: // we get here if error is deadline exceeded, which means ztunnel is alive. } case <-ctx.Done(): return nil } } } func (z *ztunnelServer) PodDeleted(ctx context.Context, uid string) error { r := &zdsapi.WorkloadRequest{ Payload: &zdsapi.WorkloadRequest_Del{ Del: &zdsapi.DelWorkload{ Uid: uid, }, }, } data, err := proto.Marshal(r) if err != nil { return err } log.Debugf("sending delete pod to ztunnel: %s %v", uid, r) var delErr []error z.conns.mu.Lock() defer z.conns.mu.Unlock() for conn := range z.conns.connectionSet { _, err := conn.send(ctx, data, nil) if err != nil { delErr = append(delErr, err) } } return errors.Join(delErr...) } func (z *ztunnelServer) PodAdded(ctx context.Context, uid string, netns Netns) error { latestConn := z.conns.LatestConn() if latestConn == nil { return fmt.Errorf("no ztunnel connection") } r := &zdsapi.WorkloadRequest{ Payload: &zdsapi.WorkloadRequest_Add{ Add: &zdsapi.AddWorkload{ Uid: uid, }, }, } log.Debugf("About to send added pod: %s to ztunnel: %v", uid, r) data, err := proto.Marshal(r) if err != nil { return err } fd := int(netns.Fd()) resp, err := latestConn.send(ctx, data, &fd) if err != nil { return err } if resp.GetAck().GetError() != "" { log.Errorf("add-workload: got ack error: %s", resp.GetAck().GetError()) return fmt.Errorf("got ack error: %s", resp.GetAck().GetError()) } return nil } // TODO ctx is unused here // nolint: unparam func (z *ztunnelServer) sendSnapshot(ctx context.Context, conn *ZtunnelConnection) error { snap := z.pods.ReadCurrentPodSnapshot() for uid, netns := range snap { var resp *zdsapi.WorkloadResponse var err error if netns != nil { fd := int(netns.Fd()) log.Debugf("Sending local pod %s ztunnel", uid) resp, err = conn.sendMsgAndWaitForAck(&zdsapi.WorkloadRequest{ Payload: &zdsapi.WorkloadRequest_Add{ Add: &zdsapi.AddWorkload{ Uid: uid, }, }, }, &fd) } else { log.Infof("netns not available for local pod %s. sending keep to ztunnel", uid) resp, err = conn.sendMsgAndWaitForAck(&zdsapi.WorkloadRequest{ Payload: &zdsapi.WorkloadRequest_Keep{ Keep: &zdsapi.KeepWorkload{ Uid: uid, }, }, }, nil) } if err != nil { return err } if resp.GetAck().GetError() != "" { log.Errorf("add-workload: got ack error: %s", resp.GetAck().GetError()) } } resp, err := conn.sendMsgAndWaitForAck(&zdsapi.WorkloadRequest{ Payload: &zdsapi.WorkloadRequest_SnapshotSent{ SnapshotSent: &zdsapi.SnapshotSent{}, }, }, nil) if err != nil { return err } log.Debugf("snaptshot sent to ztunnel") if resp.GetAck().GetError() != "" { log.Errorf("snap-sent: got ack error: %s", resp.GetAck().GetError()) } return nil } func (z *ztunnelServer) accept() (*ZtunnelConnection, error) { log.Debug("accepting unix conn") conn, err := z.listener.AcceptUnix() if err != nil { return nil, fmt.Errorf("failed to accept unix: %w", err) } log.Debug("accepted conn") return newZtunnelConnection(conn), nil } type updateResponse struct { err error resp *zdsapi.WorkloadResponse } type updateRequest struct { Update []byte Fd *int Resp chan updateResponse } type ZtunnelConnection struct { u *net.UnixConn Updates chan updateRequest } func newZtunnelConnection(u *net.UnixConn) *ZtunnelConnection { return &ZtunnelConnection{u: u, Updates: make(chan updateRequest, 100)} } func (z *ZtunnelConnection) Close() { z.u.Close() } func (z *ZtunnelConnection) send(ctx context.Context, data []byte, fd *int) (*zdsapi.WorkloadResponse, error) { ret := make(chan updateResponse, 1) req := updateRequest{ Update: data, Fd: fd, Resp: ret, } select { case z.Updates <- req: case <-ctx.Done(): return nil, ctx.Err() } select { case r := <-ret: return r.resp, r.err case <-ctx.Done(): return nil, ctx.Err() } } func (z *ZtunnelConnection) sendMsgAndWaitForAck(msg *zdsapi.WorkloadRequest, fd *int) (*zdsapi.WorkloadResponse, error) { data, err := proto.Marshal(msg) if err != nil { return nil, err } return z.sendDataAndWaitForAck(data, fd) } func (z *ZtunnelConnection) sendDataAndWaitForAck(data []byte, fd *int) (*zdsapi.WorkloadResponse, error) { var rights []byte if fd != nil { rights = unix.UnixRights(*fd) } err := z.u.SetWriteDeadline(time.Now().Add(readWriteDeadline)) if err != nil { return nil, err } _, _, err = z.u.WriteMsgUnix(data, rights, nil) if err != nil { return nil, err } // wait for ack return z.readMessage(readWriteDeadline) } func (z *ZtunnelConnection) readMessage(timeout time.Duration) (*zdsapi.WorkloadResponse, error) { m, _, err := readProto[zdsapi.WorkloadResponse](z.u, timeout, nil) return m, err } func readProto[T any, PT interface { proto.Message *T }](c *net.UnixConn, timeout time.Duration, oob []byte) (PT, int, error) { var buf [1024]byte err := c.SetReadDeadline(time.Now().Add(timeout)) if err != nil { return nil, 0, err } n, oobn, flags, _, err := c.ReadMsgUnix(buf[:], oob) if err != nil { return nil, 0, err } if flags&unix.MSG_TRUNC != 0 { return nil, 0, fmt.Errorf("truncated message") } if flags&unix.MSG_CTRUNC != 0 { return nil, 0, fmt.Errorf("truncated control message") } var resp T var respPtr PT = &resp err = proto.Unmarshal(buf[:n], respPtr) if err != nil { return nil, 0, err } return respPtr, oobn, nil }
cni/pkg/nodeagent/ztunnelserver.go
1
https://github.com/istio/istio/commit/7fc69708a1ff4d4cfee27b1e4b1105f223f9903d
[ 0.004315515514463186, 0.0007253202493302524, 0.00016273997607640922, 0.0001730791263980791, 0.0011700893519446254 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t})\n", "}\n", "\n", "func (a *DeltaAdsTest) adsReceiveChannel() {\n", "\tgo func() {\n", "\t\t<-a.context.Done()\n", "\t\ta.Cleanup()\n", "\t}()\n", "\tfor {\n", "\t\tresp, err := a.client.Recv()\n", "\t\tif err != nil {\n", "\t\t\tif isUnexpectedError(err) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontext.AfterFunc(a.context, a.Cleanup)\n" ], "file_path": "pilot/pkg/xds/deltaadstest.go", "type": "replace", "edit_start_line_idx": 97 }
#!/bin/bash # WARNING: DO NOT EDIT, THIS FILE IS PROBABLY A COPY # # The original version of this file is located in the https://github.com/istio/common-files repo. # If you're looking at this file in a different repo and want to make a change, please go to the # common-files repo, make the change there and check it in. Then come back to this repo and run # "make update-common". # Copyright Istio Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e WD=$(dirname "$0") WD=$(cd "$WD"; pwd) export FOR_BUILD_CONTAINER=1 # shellcheck disable=SC1090,SC1091 source "${WD}/setup_env.sh" MOUNT_SOURCE="${MOUNT_SOURCE:-${PWD}}" MOUNT_DEST="${MOUNT_DEST:-/work}" read -ra DOCKER_RUN_OPTIONS <<< "${DOCKER_RUN_OPTIONS:-}" [[ -t 1 ]] && DOCKER_RUN_OPTIONS+=("-it") [[ ${UID} -ne 0 ]] && DOCKER_RUN_OPTIONS+=(-u "${UID}:${DOCKER_GID}") # $CONTAINER_OPTIONS becomes an empty arg when quoted, so SC2086 is disabled for the # following command only # shellcheck disable=SC2086 "${CONTAINER_CLI}" run \ --rm \ "${DOCKER_RUN_OPTIONS[@]}" \ --init \ --sig-proxy=true \ ${DOCKER_SOCKET_MOUNT:--v /var/run/docker.sock:/var/run/docker.sock} \ $CONTAINER_OPTIONS \ --env-file <(env | grep -v ${ENV_BLOCKLIST}) \ -e IN_BUILD_CONTAINER=1 \ -e TZ="${TIMEZONE:-$TZ}" \ --mount "type=bind,source=${MOUNT_SOURCE},destination=/work" \ --mount "type=volume,source=go,destination=/go" \ --mount "type=volume,source=gocache,destination=/gocache" \ --mount "type=volume,source=cache,destination=/home/.cache" \ --mount "type=volume,source=crates,destination=/home/.cargo/registry" \ --mount "type=volume,source=git-crates,destination=/home/.cargo/git" \ ${CONDITIONAL_HOST_MOUNTS} \ -w "${MOUNT_DEST}" "${IMG}" "$@"
common/scripts/run.sh
0
https://github.com/istio/istio/commit/7fc69708a1ff4d4cfee27b1e4b1105f223f9903d
[ 0.00017906194261740893, 0.00017313229909632355, 0.00016608422447461635, 0.00017393426969647408, 0.0000040105151128955185 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t})\n", "}\n", "\n", "func (a *DeltaAdsTest) adsReceiveChannel() {\n", "\tgo func() {\n", "\t\t<-a.context.Done()\n", "\t\ta.Cleanup()\n", "\t}()\n", "\tfor {\n", "\t\tresp, err := a.client.Recv()\n", "\t\tif err != nil {\n", "\t\t\tif isUnexpectedError(err) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontext.AfterFunc(a.context, a.Cleanup)\n" ], "file_path": "pilot/pkg/xds/deltaadstest.go", "type": "replace", "edit_start_line_idx": 97 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package structpath import ( "bytes" "encoding/json" "errors" "fmt" "reflect" "regexp" "strings" "github.com/google/go-cmp/cmp" "google.golang.org/protobuf/proto" "k8s.io/client-go/util/jsonpath" "istio.io/istio/pkg/test" "istio.io/istio/pkg/util/protomarshal" ) var ( fixupNumericJSONComparison = regexp.MustCompile(`([=<>]+)\s*([0-9]+)\s*\)`) fixupAttributeReference = regexp.MustCompile(`\[\s*'[^']+\s*'\s*]`) ) type Instance struct { structure any isJSON bool constraints []constraint creationError error } type constraint func() error // ForProto creates a structpath Instance by marshaling the proto to JSON and then evaluating over that // structure. This is the most generally useful form as serialization to JSON also automatically // converts proto.Any and proto.Struct to the serialized JSON forms which can then be evaluated // over. The downside is the loss of type fidelity for numeric types as JSON can only represent // floats. func ForProto(proto proto.Message) *Instance { if proto == nil { return newErrorInstance(errors.New("expected non-nil proto")) } parsed, err := protoToParsedJSON(proto) if err != nil { return newErrorInstance(err) } i := &Instance{ isJSON: true, structure: parsed, } i.structure = parsed return i } func newErrorInstance(err error) *Instance { return &Instance{ isJSON: true, creationError: err, } } func protoToParsedJSON(message proto.Message) (any, error) { // Convert proto to json and then parse into struct jsonText, err := protomarshal.MarshalIndent(message, " ") if err != nil { return nil, fmt.Errorf("failed to convert proto to JSON: %v", err) } var parsed any err = json.Unmarshal(jsonText, &parsed) if err != nil { return nil, fmt.Errorf("failed to parse into JSON struct: %v", err) } return parsed, nil } func (i *Instance) Select(path string, args ...any) *Instance { if i.creationError != nil { // There was an error during the creation of this Instance. Just return the // same instance since it will error on Check anyway. return i } path = fmt.Sprintf(path, args...) value, err := i.findValue(path) if err != nil { return newErrorInstance(err) } if value == nil { return newErrorInstance(fmt.Errorf("cannot select non-existent path: %v", path)) } // Success. return &Instance{ isJSON: i.isJSON, structure: value, } } func (i *Instance) appendConstraint(fn func() error) *Instance { i.constraints = append(i.constraints, fn) return i } func (i *Instance) Equals(expected any, path string, args ...any) *Instance { path = fmt.Sprintf(path, args...) return i.appendConstraint(func() error { typeOf := reflect.TypeOf(expected) protoMessageType := reflect.TypeOf((*proto.Message)(nil)).Elem() if typeOf.Implements(protoMessageType) { return i.equalsStruct(expected.(proto.Message), path) } switch kind := typeOf.Kind(); kind { case reflect.String: return i.equalsString(reflect.ValueOf(expected).String(), path) case reflect.Bool: return i.equalsBool(reflect.ValueOf(expected).Bool(), path) case reflect.Float32, reflect.Float64: return i.equalsNumber(reflect.ValueOf(expected).Float(), path) case reflect.Int, reflect.Int8, reflect.Int32, reflect.Int64: return i.equalsNumber(float64(reflect.ValueOf(expected).Int()), path) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: return i.equalsNumber(float64(reflect.ValueOf(expected).Uint()), path) case protoMessageType.Kind(): } // TODO: Add struct support return fmt.Errorf("attempt to call Equals for unsupported type: %v", expected) }) } func (i *Instance) ContainSubstring(substr, path string) *Instance { return i.appendConstraint(func() error { value, err := i.execute(path) if err != nil { return err } if found := strings.Contains(value, substr); !found { return fmt.Errorf("substring %v did not match: %v", substr, value) } return nil }) } func (i *Instance) equalsString(expected string, path string) error { value, err := i.execute(path) if err != nil { return err } if value != expected { return fmt.Errorf("expected %v but got %v for path %v", expected, value, path) } return nil } func (i *Instance) equalsNumber(expected float64, path string) error { v, err := i.findValue(path) if err != nil { return err } result := reflect.ValueOf(v).Float() if result != expected { return fmt.Errorf("expected %v but got %v for path %v", expected, result, path) } return nil } func (i *Instance) equalsBool(expected bool, path string) error { v, err := i.findValue(path) if err != nil { return err } result := reflect.ValueOf(v).Bool() if result != expected { return fmt.Errorf("expected %v but got %v for path %v", expected, result, path) } return nil } func (i *Instance) equalsStruct(proto proto.Message, path string) error { jsonStruct, err := protoToParsedJSON(proto) if err != nil { return err } v, err := i.findValue(path) if err != nil { return err } diff := cmp.Diff(reflect.ValueOf(v).Interface(), jsonStruct) if diff != "" { return fmt.Errorf("structs did not match: %v", diff) } return nil } func (i *Instance) Exists(path string, args ...any) *Instance { path = fmt.Sprintf(path, args...) return i.appendConstraint(func() error { v, err := i.findValue(path) if err != nil { return err } if v == nil { return fmt.Errorf("no entry exists at path: %v", path) } return nil }) } func (i *Instance) NotExists(path string, args ...any) *Instance { path = fmt.Sprintf(path, args...) return i.appendConstraint(func() error { parser := jsonpath.New("path") err := parser.Parse(i.fixPath(path)) if err != nil { return fmt.Errorf("invalid path: %v - %v", path, err) } values, err := parser.AllowMissingKeys(true).FindResults(i.structure) if err != nil { return fmt.Errorf("err finding results for path: %v - %v", path, err) } if len(values) == 0 { return nil } if len(values[0]) > 0 { return fmt.Errorf("expected no result but got: %v for path: %v", values[0], path) } return nil }) } // Check executes the set of constraints for this selection // and returns the first error encountered, or nil if all constraints // have been successfully met. All constraints are removed after them // check is performed. func (i *Instance) Check() error { // After the check completes, clear out the constraints. defer func() { i.constraints = i.constraints[:0] }() // If there was a creation error, just return that immediately. if i.creationError != nil { return i.creationError } for _, c := range i.constraints { if err := c(); err != nil { return err } } return nil } // CheckOrFail calls Check on this selection and fails the given test if an // error is encountered. func (i *Instance) CheckOrFail(t test.Failer) *Instance { t.Helper() if err := i.Check(); err != nil { t.Fatal(err) } return i } func (i *Instance) execute(path string) (string, error) { parser := jsonpath.New("path") err := parser.Parse(i.fixPath(path)) if err != nil { return "", fmt.Errorf("invalid path: %v - %v", path, err) } buf := new(bytes.Buffer) err = parser.Execute(buf, i.structure) if err != nil { return "", fmt.Errorf("err finding results for path: %v - %v", path, err) } return buf.String(), nil } func (i *Instance) findValue(path string) (any, error) { parser := jsonpath.New("path") err := parser.Parse(i.fixPath(path)) if err != nil { return nil, fmt.Errorf("invalid path: %v - %v", path, err) } values, err := parser.FindResults(i.structure) if err != nil { return nil, fmt.Errorf("err finding results for path: %v: %v. Structure: %v", path, err, i.structure) } if len(values) == 0 || len(values[0]) == 0 { return nil, fmt.Errorf("no value for path: %v", path) } return values[0][0].Interface(), nil } // Fixes up some quirks in jsonpath handling. // See https://github.com/kubernetes/client-go/issues/553 func (i *Instance) fixPath(path string) string { // jsonpath doesn't handle numeric comparisons in a tolerant way. All json numbers are floats // and filter expressions on the form {.x[?(@.some.value==123]} won't work but // {.x[?(@.some.value==123.0]} will. result := path if i.isJSON { template := "$1$2.0)" result = fixupNumericJSONComparison.ReplaceAllString(path, template) } // jsonpath doesn't like map literal references that contain periods. I.e // you can't do x['user.map'] but x.user\.map works so we just translate to that result = string(fixupAttributeReference.ReplaceAllFunc([]byte(result), func(i []byte) []byte { input := string(i) input = strings.Replace(input, "[", "", 1) input = strings.Replace(input, "]", "", 1) input = strings.Replace(input, "'", "", 2) parts := strings.Split(input, ".") output := "." for i := 0; i < len(parts)-1; i++ { output += parts[i] output += "\\." } output += parts[len(parts)-1] return []byte(output) })) return result }
pkg/test/util/structpath/instance.go
0
https://github.com/istio/istio/commit/7fc69708a1ff4d4cfee27b1e4b1105f223f9903d
[ 0.0011527016758918762, 0.00020344252698123455, 0.00015835162776056677, 0.00016945888637565076, 0.00016711898206267506 ]
{ "id": 8, "code_window": [ "\t\t}\n", "\t})\n", "}\n", "\n", "func (a *DeltaAdsTest) adsReceiveChannel() {\n", "\tgo func() {\n", "\t\t<-a.context.Done()\n", "\t\ta.Cleanup()\n", "\t}()\n", "\tfor {\n", "\t\tresp, err := a.client.Recv()\n", "\t\tif err != nil {\n", "\t\t\tif isUnexpectedError(err) {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tcontext.AfterFunc(a.context, a.Cleanup)\n" ], "file_path": "pilot/pkg/xds/deltaadstest.go", "type": "replace", "edit_start_line_idx": 97 }
// Copyright Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package configdump import ( "sort" admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3" cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" v3 "istio.io/istio/pilot/pkg/xds/v3" ) // GetDynamicClusterDump retrieves a cluster dump with just dynamic active clusters in it func (w *Wrapper) GetDynamicClusterDump(stripVersions bool) (*admin.ClustersConfigDump, error) { clusterDump, err := w.GetClusterConfigDump() if err != nil { return nil, err } dac := clusterDump.GetDynamicActiveClusters() // Allow sorting to work even if we don't have the exact same type for i := range dac { dac[i].Cluster.TypeUrl = v3.ClusterType } sort.Slice(dac, func(i, j int) bool { cluster := &cluster.Cluster{} err = dac[i].Cluster.UnmarshalTo(cluster) if err != nil { return false } name := cluster.Name err = dac[j].Cluster.UnmarshalTo(cluster) if err != nil { return false } return name < cluster.Name }) if stripVersions { for i := range dac { dac[i].VersionInfo = "" dac[i].LastUpdated = nil } } return &admin.ClustersConfigDump{DynamicActiveClusters: dac}, nil } // GetClusterConfigDump retrieves the cluster config dump from the ConfigDump func (w *Wrapper) GetClusterConfigDump() (*admin.ClustersConfigDump, error) { clusterDumpAny, err := w.getSection(clusters) if err != nil { return nil, err } clusterDump := &admin.ClustersConfigDump{} err = clusterDumpAny.UnmarshalTo(clusterDump) if err != nil { return nil, err } return clusterDump, nil }
istioctl/pkg/util/configdump/cluster.go
0
https://github.com/istio/istio/commit/7fc69708a1ff4d4cfee27b1e4b1105f223f9903d
[ 0.00017945471336133778, 0.000171337160281837, 0.00016502547077834606, 0.0001701707806205377, 0.000004593018729792675 ]
{ "id": 0, "code_window": [ "\t\treturn nil\n", "\t}\n", "\n", "\tsort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))\n", "\n", "\tvar errList []error\n", "\t// TODO: This should be parallelized.\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tglog.V(2).Infof(\"Looking to cleanup old replica sets for deployment %q\", deployment.Name)\n" ], "file_path": "pkg/controller/deployment/sync.go", "type": "add", "edit_start_line_idx": 549 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package deployment import ( "fmt" "reflect" "sort" "strconv" "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" labelsutil "k8s.io/kubernetes/pkg/util/labels" ) // syncStatusOnly only updates Deployments Status and doesn't take any mutating actions. func (dc *DeploymentController) syncStatusOnly(deployment *extensions.Deployment) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false) if err != nil { return err } allRSs := append(oldRSs, newRS) return dc.syncDeploymentStatus(allRSs, newRS, deployment) } // sync is responsible for reconciling deployments on scaling events or when they // are paused. func (dc *DeploymentController) sync(deployment *extensions.Deployment) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false) if err != nil { return err } if err := dc.scale(deployment, newRS, oldRSs); err != nil { // If we get an error while trying to scale, the deployment will be requeued // so we can abort this resync return err } allRSs := append(oldRSs, newRS) return dc.syncDeploymentStatus(allRSs, newRS, deployment) } // checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition. // These conditions are needed so that we won't accidentally report lack of progress for resumed deployments // that were paused for longer than progressDeadlineSeconds. func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) error { if d.Spec.ProgressDeadlineSeconds == nil { return nil } cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) if cond != nil && cond.Reason == deploymentutil.TimedOutReason { // If we have reported lack of progress, do not overwrite it with a paused condition. return nil } pausedCondExists := cond != nil && cond.Reason == deploymentutil.PausedDeployReason needsUpdate := false if d.Spec.Paused && !pausedCondExists { condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } else if !d.Spec.Paused && pausedCondExists { condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } if !needsUpdate { return nil } var err error d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) return err } // getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated. // 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV). // 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1), // only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. // 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of replica sets, thus incorrect deployment status. func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *extensions.Deployment, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { // List the deployment's RSes & Pods and apply pod-template-hash info to deployment's adopted RSes/Pods rsList, podList, err := dc.rsAndPodsWithHashKeySynced(deployment) if err != nil { return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err) } _, allOldRSs, err := deploymentutil.FindOldReplicaSets(deployment, rsList, podList) if err != nil { return nil, nil, err } // Get new replica set with the updated revision number newRS, err := dc.getNewReplicaSet(deployment, rsList, allOldRSs, createIfNotExisted) if err != nil { return nil, nil, err } return newRS, allOldRSs, nil } // rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced. func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) { rsList, err := deploymentutil.ListReplicaSets(deployment, func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } return dc.rsLister.ReplicaSets(namespace).List(parsed) }) if err != nil { return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err) } syncedRSList := []*extensions.ReplicaSet{} for _, rs := range rsList { // Add pod-template-hash information if it's not in the RS. // Otherwise, new RS produced by Deployment will overlap with pre-existing ones // that aren't constrained by the pod-template-hash. syncedRS, err := dc.addHashKeyToRSAndPods(rs) if err != nil { return nil, nil, err } syncedRSList = append(syncedRSList, syncedRS) } syncedPodList, err := dc.listPods(deployment) if err != nil { return nil, nil, err } return syncedRSList, syncedPodList, nil } // addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps: // 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created // 2. Add hash label to all pods this rs owns, wait until replicaset controller reports rs.Status.FullyLabeledReplicas equal to the desired number of replicas // 3. Add hash label to the rs's label and selector func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { // If the rs already has the new hash label in its selector, it's done syncing if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { return rs, nil } hash := deploymentutil.GetReplicaSetHash(rs) // 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label. updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { // Precondition: the RS doesn't contain the new hash in its pod template label. if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { return utilerrors.ErrPreconditionViolated } updated.Spec.Template.Labels = labelsutil.AddLabel(updated.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) return nil }) if err != nil { return nil, fmt.Errorf("error updating replica set %s/%s pod template label with template hash: %v", rs.Namespace, rs.Name, err) } // Make sure rs pod template is updated so that it won't create pods without the new label (orphaned pods). if updatedRS.Generation > updatedRS.Status.ObservedGeneration { if err = deploymentutil.WaitForReplicaSetUpdated(dc.client, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil { return nil, fmt.Errorf("error waiting for replica set %s/%s to be observed by controller: %v", updatedRS.Namespace, updatedRS.Name, err) } glog.V(4).Infof("Observed the update of replica set %s/%s's pod template with hash %s.", rs.Namespace, rs.Name, hash) } // 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted. selector, err := metav1.LabelSelectorAsSelector(updatedRS.Spec.Selector) if err != nil { return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err) } options := metav1.ListOptions{LabelSelector: selector.String()} parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } pods, err := dc.podLister.Pods(updatedRS.Namespace).List(parsed) if err != nil { return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", rs.Namespace, options, err) } podList := v1.PodList{Items: make([]v1.Pod, 0, len(pods))} for i := range pods { podList.Items = append(podList.Items, *pods[i]) } if err := deploymentutil.LabelPodsWithHash(&podList, dc.client, dc.podLister, rs.Namespace, rs.Name, hash); err != nil { return nil, fmt.Errorf("error in adding template hash label %s to pods %+v: %s", hash, podList, err) } // We need to wait for the replicaset controller to observe the pods being // labeled with pod template hash. Because previously we've called // WaitForReplicaSetUpdated, the replicaset controller should have dropped // FullyLabeledReplicas to 0 already, we only need to wait it to increase // back to the number of replicas in the spec. if err := deploymentutil.WaitForPodsHashPopulated(dc.client, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil { return nil, fmt.Errorf("Replica set %s/%s: error waiting for replicaset controller to observe pods being labeled with template hash: %v", updatedRS.Namespace, updatedRS.Name, err) } // 3. Update rs label and selector to include the new hash label // Copy the old selector, so that we can scrub out any orphaned pods updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { // Precondition: the RS doesn't contain the new hash in its label and selector. if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash { return utilerrors.ErrPreconditionViolated } updated.Labels = labelsutil.AddLabel(updated.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) updated.Spec.Selector = labelsutil.AddLabelToSelector(updated.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, hash) return nil }) // If the RS isn't actually updated, that's okay, we'll retry in the // next sync loop since its selector isn't updated yet. if err != nil { return nil, fmt.Errorf("error updating ReplicaSet %s/%s label and selector with template hash: %v", updatedRS.Namespace, updatedRS.Name, err) } // TODO: look for orphaned pods and label them in the background somewhere else periodically return updatedRS, nil } func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) { return deploymentutil.ListPods(deployment, func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } pods, err := dc.podLister.Pods(namespace).List(parsed) result := v1.PodList{Items: make([]v1.Pod, 0, len(pods))} for i := range pods { result.Items = append(result.Items, *pods[i]) } return &result, err }) } // Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet. // 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's). // 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes. // 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. // Note that the pod-template-hash will be added to adopted RSes and pods. func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) { existingNewRS, err := deploymentutil.FindNewReplicaSet(deployment, rsList) if err != nil { return nil, err } // Calculate the max revision number among all old RSes maxOldRevision := deploymentutil.MaxRevision(oldRSs) // Calculate revision number for this new replica set newRevision := strconv.FormatInt(maxOldRevision+1, 10) // Latest replica set exists. We need to sync its annotations (includes copying all but // annotationsToSkip from the parent deployment, and update revision, desiredReplicas, // and maxReplicas) and also update the revision annotation in the deployment with the // latest revision. if existingNewRS != nil { objCopy, err := api.Scheme.Copy(existingNewRS) if err != nil { return nil, err } rsCopy := objCopy.(*extensions.ReplicaSet) // Set existing new replica set's annotation annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(deployment, rsCopy, newRevision, true) minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != deployment.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = deployment.Spec.MinReadySeconds return dc.client.Extensions().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) } updateConditions := deploymentutil.SetDeploymentRevision(deployment, newRevision) // If no other Progressing condition has been recorded and we need to estimate the progress // of this deployment then it is likely that old users started caring about progress. In that // case we need to take into account the first time we noticed their new replica set. cond := deploymentutil.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing) if deployment.Spec.ProgressDeadlineSeconds != nil && cond == nil { msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name) condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *condition) updateConditions = true } if updateConditions { if deployment, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment); err != nil { return nil, err } } return rsCopy, nil } if !createIfNotExisted { return nil, nil } // new ReplicaSet does not exist, create one. namespace := deployment.Namespace podTemplateSpecHash := fmt.Sprintf("%d", deploymentutil.GetPodTemplateSpecHash(deployment.Spec.Template)) newRSTemplate := deploymentutil.GetNewReplicaSetTemplate(deployment) newRSTemplate.Labels = labelsutil.CloneAndAddLabel(deployment.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Add podTemplateHash label to selector. newRSSelector := labelsutil.CloneSelectorAndAddLabel(deployment.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Create new ReplicaSet newRS := extensions.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ // Make the name deterministic, to ensure idempotence Name: deployment.Name + "-" + podTemplateSpecHash, Namespace: namespace, }, Spec: extensions.ReplicaSetSpec{ Replicas: func(i int32) *int32 { return &i }(0), MinReadySeconds: deployment.Spec.MinReadySeconds, Selector: newRSSelector, Template: newRSTemplate, }, } var trueVar = true controllerRef := &metav1.OwnerReference{ APIVersion: getDeploymentKind().GroupVersion().String(), Kind: getDeploymentKind().Kind, Name: deployment.Name, UID: deployment.UID, Controller: &trueVar, } newRS.OwnerReferences = append(newRS.OwnerReferences, *controllerRef) allRSs := append(oldRSs, &newRS) newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, &newRS) if err != nil { return nil, err } *(newRS.Spec.Replicas) = newReplicasCount // Set new replica set's annotation deploymentutil.SetNewReplicaSetAnnotations(deployment, &newRS, newRevision, false) createdRS, err := dc.client.Extensions().ReplicaSets(namespace).Create(&newRS) switch { // We may end up hitting this due to a slow cache or a fast resync of the deployment. // TODO: Restore once https://github.com/kubernetes/kubernetes/issues/29735 is fixed // ie. we start using a new hashing algorithm. case errors.IsAlreadyExists(err): return nil, err // return dc.rsLister.ReplicaSets(namespace).Get(newRS.Name) case err != nil: msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err) if deployment.Spec.ProgressDeadlineSeconds != nil { cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *cond) // We don't really care about this error at this point, since we have a bigger issue to report. // TODO: Update the rest of the Deployment status, too. We may need to do this every time we // error out in all other places in the controller so that we let users know that their deployments // have been noticed by the controller, albeit with errors. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 _, _ = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(deployment) } dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) return nil, err } if newReplicasCount > 0 { dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount) } deploymentutil.SetDeploymentRevision(deployment, newRevision) if deployment.Spec.ProgressDeadlineSeconds != nil { msg := fmt.Sprintf("Created new replica set %q", createdRS.Name) condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *condition) } _, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment) return createdRS, err } // scale scales proportionally in order to mitigate risk. Otherwise, scaling up can increase the size // of the new replica set and scaling down can decrease the sizes of the old ones, both of which would // have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable // replicas in the event of a problem with the rolled out template. Should run only on scaling events or // when a deployment is paused and not during the normal rollout process. func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) error { // If there is only one active replica set then we should scale that up to the full count of the // deployment. If there is no active replica set, then we should scale up the newest replica set. if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil { if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) { return nil } _, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment) return err } // If the new replica set is saturated, old replica sets should be fully scaled down. // This case handles replica set adoption during a saturated new replica set. if deploymentutil.IsSaturated(deployment, newRS) { for _, old := range controller.FilterActiveReplicaSets(oldRSs) { if _, _, err := dc.scaleReplicaSetAndRecordEvent(old, 0, deployment); err != nil { return err } } return nil } // There are old replica sets with pods and the new replica set is not saturated. // We need to proportionally scale all replica sets (new and old) in case of a // rolling deployment. if deploymentutil.IsRollingUpdate(deployment) { allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS)) allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) allowedSize := int32(0) if *(deployment.Spec.Replicas) > 0 { allowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment) } // Number of additional replicas that can be either added or removed from the total // replicas count. These replicas should be distributed proportionally to the active // replica sets. deploymentReplicasToAdd := allowedSize - allRSsReplicas // The additional replicas should be distributed proportionally amongst the active // replica sets from the larger to the smaller in size replica set. Scaling direction // drives what happens in case we are trying to scale replica sets of the same size. // In such a case when scaling up, we should scale up newer replica sets first, and // when scaling down, we should scale down older replica sets first. var scalingOperation string switch { case deploymentReplicasToAdd > 0: sort.Sort(controller.ReplicaSetsBySizeNewer(allRSs)) scalingOperation = "up" case deploymentReplicasToAdd < 0: sort.Sort(controller.ReplicaSetsBySizeOlder(allRSs)) scalingOperation = "down" } // Iterate over all active replica sets and estimate proportions for each of them. // The absolute value of deploymentReplicasAdded should never exceed the absolute // value of deploymentReplicasToAdd. deploymentReplicasAdded := int32(0) nameToSize := make(map[string]int32) for i := range allRSs { rs := allRSs[i] // Estimate proportions if we have replicas to add, otherwise simply populate // nameToSize with the current sizes for each replica set. if deploymentReplicasToAdd != 0 { proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded) nameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion deploymentReplicasAdded += proportion } else { nameToSize[rs.Name] = *(rs.Spec.Replicas) } } // Update all replica sets for i := range allRSs { rs := allRSs[i] // Add/remove any leftovers to the largest replica set. if i == 0 && deploymentReplicasToAdd != 0 { leftover := deploymentReplicasToAdd - deploymentReplicasAdded nameToSize[rs.Name] = nameToSize[rs.Name] + leftover if nameToSize[rs.Name] < 0 { nameToSize[rs.Name] = 0 } } // TODO: Use transactions when we have them. if _, _, err := dc.scaleReplicaSet(rs, nameToSize[rs.Name], deployment, scalingOperation); err != nil { // Return as soon as we fail, the deployment is requeued return err } } } return nil } func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { // No need to scale if *(rs.Spec.Replicas) == newScale { return false, rs, nil } var scalingOperation string if *(rs.Spec.Replicas) < newScale { scalingOperation = "up" } else { scalingOperation = "down" } scaled, newRS, err := dc.scaleReplicaSet(rs, newScale, deployment, scalingOperation) return scaled, newRS, err } func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment, scalingOperation string) (bool, *extensions.ReplicaSet, error) { objCopy, err := api.Scheme.Copy(rs) if err != nil { return false, nil, err } rsCopy := objCopy.(*extensions.ReplicaSet) sizeNeedsUpdate := *(rsCopy.Spec.Replicas) != newScale // TODO: Do not mutate the replica set here, instead simply compare the annotation and if they mismatch // call SetReplicasAnnotations inside the following if clause. Then we can also move the deep-copy from // above inside the if too. annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment)) scaled := false if sizeNeedsUpdate || annotationsNeedUpdate { *(rsCopy.Spec.Replicas) = newScale rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy) if err == nil && sizeNeedsUpdate { scaled = true dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) } } return scaled, rs, err } // cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets // where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept // around by default 1) for historical reasons and 2) for the ability to rollback a deployment. func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error { if deployment.Spec.RevisionHistoryLimit == nil { return nil } diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit if diff <= 0 { return nil } sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) var errList []error // TODO: This should be parallelized. for i := int32(0); i < diff; i++ { rs := oldRSs[i] // Avoid delete replica set with non-zero replica counts if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration { continue } if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { glog.V(2).Infof("Failed deleting old replica set %v for deployment %v: %v", rs.Name, deployment.Name, err) errList = append(errList, err) } } return utilerrors.NewAggregate(errList) } // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error { newStatus := calculateStatus(allRSs, newRS, d) if reflect.DeepEqual(d.Status, newStatus) { return nil } newDeployment := d newDeployment.Status = newStatus _, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } // calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets. func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) extensions.DeploymentStatus { availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs) totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) unavailableReplicas := totalReplicas - availableReplicas // If unavailableReplicas is negative, then that means the Deployment has more available replicas running than // desired, eg. whenever it scales down. In such a case we should simply default unavailableReplicas to zero. if unavailableReplicas < 0 { unavailableReplicas = 0 } status := extensions.DeploymentStatus{ // TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value. ObservedGeneration: deployment.Generation, Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs), UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}), ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs), AvailableReplicas: availableReplicas, UnavailableReplicas: unavailableReplicas, } // Copy conditions one by one so we won't mutate the original object. conditions := deployment.Status.Conditions for i := range conditions { status.Conditions = append(status.Conditions, conditions[i]) } if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) { minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.") deploymentutil.SetDeploymentCondition(&status, *minAvailability) } else { noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.") deploymentutil.SetDeploymentCondition(&status, *noMinAvailability) } return status } // isScalingEvent checks whether the provided deployment has been updated with a scaling event // by looking at the desired-replicas annotation in the active replica sets of the deployment. func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment) (bool, error) { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, false) if err != nil { return false, err } allRSs := append(oldRSs, newRS) for _, rs := range controller.FilterActiveReplicaSets(allRSs) { desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs) if !ok { continue } if desired != *(d.Spec.Replicas) { return true, nil } } return false, nil }
pkg/controller/deployment/sync.go
1
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.9983001351356506, 0.1897345930337906, 0.00016543178935535252, 0.0009371625492349267, 0.37519994378089905 ]
{ "id": 0, "code_window": [ "\t\treturn nil\n", "\t}\n", "\n", "\tsort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))\n", "\n", "\tvar errList []error\n", "\t// TODO: This should be parallelized.\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tglog.V(2).Infof(\"Looking to cleanup old replica sets for deployment %q\", deployment.Name)\n" ], "file_path": "pkg/controller/deployment/sync.go", "type": "add", "edit_start_line_idx": 549 }
package waiter import ( "fmt" "reflect" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) // A Config provides a collection of configuration values to setup a generated // waiter code with. type Config struct { Name string Delay int MaxAttempts int Operation string Acceptors []WaitAcceptor } // A WaitAcceptor provides the information needed to wait for an API operation // to complete. type WaitAcceptor struct { Expected interface{} Matcher string State string Argument string } // A Waiter provides waiting for an operation to complete. type Waiter struct { Config Client interface{} Input interface{} } // Wait waits for an operation to complete, expire max attempts, or fail. Error // is returned if the operation fails. func (w *Waiter) Wait() error { client := reflect.ValueOf(w.Client) in := reflect.ValueOf(w.Input) method := client.MethodByName(w.Config.Operation + "Request") for i := 0; i < w.MaxAttempts; i++ { res := method.Call([]reflect.Value{in}) req := res[0].Interface().(*request.Request) req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter")) err := req.Send() for _, a := range w.Acceptors { result := false var vals []interface{} switch a.Matcher { case "pathAll", "path": // Require all matches to be equal for result to match vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) if len(vals) == 0 { break } result = true for _, val := range vals { if !awsutil.DeepEqual(val, a.Expected) { result = false break } } case "pathAny": // Only a single match needs to equal for the result to match vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) for _, val := range vals { if awsutil.DeepEqual(val, a.Expected) { result = true break } } case "status": s := a.Expected.(int) result = s == req.HTTPResponse.StatusCode case "error": if aerr, ok := err.(awserr.Error); ok { result = aerr.Code() == a.Expected.(string) } case "pathList": // ignored matcher default: logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s", w.Config.Operation, a.Matcher) } if !result { // If there was no matching result found there is nothing more to do // for this response, retry the request. continue } switch a.State { case "success": // waiter completed return nil case "failure": // Waiter failure state triggered return awserr.New("ResourceNotReady", fmt.Sprintf("failed waiting for successful resource state"), err) case "retry": // clear the error and retry the operation err = nil default: logf(client, "WARNING: Waiter for %s encountered unexpected state: %s", w.Config.Operation, a.State) } } if err != nil { return err } time.Sleep(time.Second * time.Duration(w.Delay)) } return awserr.New("ResourceNotReady", fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil) } func logf(client reflect.Value, msg string, args ...interface{}) { cfgVal := client.FieldByName("Config") if !cfgVal.IsValid() { return } if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil { cfg.Logger.Log(fmt.Sprintf(msg, args...)) } }
vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.001459492021240294, 0.0002846936695277691, 0.00016716444224584848, 0.00017059038509614766, 0.0003354553773533553 ]
{ "id": 0, "code_window": [ "\t\treturn nil\n", "\t}\n", "\n", "\tsort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))\n", "\n", "\tvar errList []error\n", "\t// TODO: This should be parallelized.\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tglog.V(2).Infof(\"Looking to cleanup old replica sets for deployment %q\", deployment.Name)\n" ], "file_path": "pkg/controller/deployment/sync.go", "type": "add", "edit_start_line_idx": 549 }
#!/bin/bash # Copyright 2014 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## Contains configuration values for interacting with the Vagrant cluster in test mode KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. source "${KUBE_ROOT}/cluster/libvirt-coreos/config-default.sh"
cluster/libvirt-coreos/config-test.sh
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.00017567838949616998, 0.00017462889081798494, 0.0001735793921397999, 0.00017462889081798494, 0.0000010494986781850457 ]
{ "id": 0, "code_window": [ "\t\treturn nil\n", "\t}\n", "\n", "\tsort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))\n", "\n", "\tvar errList []error\n", "\t// TODO: This should be parallelized.\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tglog.V(2).Infof(\"Looking to cleanup old replica sets for deployment %q\", deployment.Name)\n" ], "file_path": "pkg/controller/deployment/sync.go", "type": "add", "edit_start_line_idx": 549 }
// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package precis import ( "golang.org/x/text/cases" "golang.org/x/text/runes" "golang.org/x/text/transform" "golang.org/x/text/unicode/norm" "golang.org/x/text/width" ) // An Option is used to define the behavior and rules of a Profile. type Option func(*options) type options struct { // Preparation options foldWidth bool // Enforcement options cases transform.Transformer disallow runes.Set norm norm.Form additional []func() transform.Transformer width *width.Transformer disallowEmpty bool bidiRule bool // Comparison options ignorecase bool } func getOpts(o ...Option) (res options) { for _, f := range o { f(&res) } return } var ( // The IgnoreCase option causes the profile to perform a case insensitive // comparison during the PRECIS comparison step. IgnoreCase Option = ignoreCase // The FoldWidth option causes the profile to map non-canonical wide and // narrow variants to their decomposition mapping. This is useful for // profiles that are based on the identifier class which would otherwise // disallow such characters. FoldWidth Option = foldWidth // The DisallowEmpty option causes the enforcement step to return an error if // the resulting string would be empty. DisallowEmpty Option = disallowEmpty // The BidiRule option causes the Bidi Rule defined in RFC 5893 to be // applied. BidiRule Option = bidiRule ) var ( ignoreCase = func(o *options) { o.ignorecase = true } foldWidth = func(o *options) { o.foldWidth = true } disallowEmpty = func(o *options) { o.disallowEmpty = true } bidiRule = func(o *options) { o.bidiRule = true } ) // The AdditionalMapping option defines the additional mapping rule for the // Profile by applying Transformer's in sequence. func AdditionalMapping(t ...func() transform.Transformer) Option { return func(o *options) { o.additional = t } } // The Norm option defines a Profile's normalization rule. Defaults to NFC. func Norm(f norm.Form) Option { return func(o *options) { o.norm = f } } // The FoldCase option defines a Profile's case mapping rule. Options can be // provided to determine the type of case folding used. func FoldCase(opts ...cases.Option) Option { return func(o *options) { o.cases = cases.Fold(opts...) } } // The Disallow option further restricts a Profile's allowed characters beyond // what is disallowed by the underlying string class. func Disallow(set runes.Set) Option { return func(o *options) { o.disallow = set } }
vendor/golang.org/x/text/secure/precis/options.go
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.00019822655303869396, 0.00017223837494384497, 0.0001654819498071447, 0.00017009314615279436, 0.00000880192601471208 ]
{ "id": 2, "code_window": [ "\n", "// Waits for the deployment to clean up old rcs.\n", "func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {\n", "\treturn wait.Poll(Poll, 5*time.Minute, func() (bool, error) {\n", "\t\tdeployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})\n", "\t\tif err != nil {\n", "\t\t\treturn false, err\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tvar oldRSs []*extensions.ReplicaSet\n", "\tvar d *extensions.Deployment\n", "\n", "\tpollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {\n" ], "file_path": "test/e2e/framework/util.go", "type": "replace", "edit_start_line_idx": 1000 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package deployment import ( "fmt" "reflect" "sort" "strconv" "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" labelsutil "k8s.io/kubernetes/pkg/util/labels" ) // syncStatusOnly only updates Deployments Status and doesn't take any mutating actions. func (dc *DeploymentController) syncStatusOnly(deployment *extensions.Deployment) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false) if err != nil { return err } allRSs := append(oldRSs, newRS) return dc.syncDeploymentStatus(allRSs, newRS, deployment) } // sync is responsible for reconciling deployments on scaling events or when they // are paused. func (dc *DeploymentController) sync(deployment *extensions.Deployment) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false) if err != nil { return err } if err := dc.scale(deployment, newRS, oldRSs); err != nil { // If we get an error while trying to scale, the deployment will be requeued // so we can abort this resync return err } allRSs := append(oldRSs, newRS) return dc.syncDeploymentStatus(allRSs, newRS, deployment) } // checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition. // These conditions are needed so that we won't accidentally report lack of progress for resumed deployments // that were paused for longer than progressDeadlineSeconds. func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) error { if d.Spec.ProgressDeadlineSeconds == nil { return nil } cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) if cond != nil && cond.Reason == deploymentutil.TimedOutReason { // If we have reported lack of progress, do not overwrite it with a paused condition. return nil } pausedCondExists := cond != nil && cond.Reason == deploymentutil.PausedDeployReason needsUpdate := false if d.Spec.Paused && !pausedCondExists { condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } else if !d.Spec.Paused && pausedCondExists { condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } if !needsUpdate { return nil } var err error d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) return err } // getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated. // 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV). // 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1), // only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. // 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of replica sets, thus incorrect deployment status. func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *extensions.Deployment, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { // List the deployment's RSes & Pods and apply pod-template-hash info to deployment's adopted RSes/Pods rsList, podList, err := dc.rsAndPodsWithHashKeySynced(deployment) if err != nil { return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err) } _, allOldRSs, err := deploymentutil.FindOldReplicaSets(deployment, rsList, podList) if err != nil { return nil, nil, err } // Get new replica set with the updated revision number newRS, err := dc.getNewReplicaSet(deployment, rsList, allOldRSs, createIfNotExisted) if err != nil { return nil, nil, err } return newRS, allOldRSs, nil } // rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced. func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) { rsList, err := deploymentutil.ListReplicaSets(deployment, func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } return dc.rsLister.ReplicaSets(namespace).List(parsed) }) if err != nil { return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err) } syncedRSList := []*extensions.ReplicaSet{} for _, rs := range rsList { // Add pod-template-hash information if it's not in the RS. // Otherwise, new RS produced by Deployment will overlap with pre-existing ones // that aren't constrained by the pod-template-hash. syncedRS, err := dc.addHashKeyToRSAndPods(rs) if err != nil { return nil, nil, err } syncedRSList = append(syncedRSList, syncedRS) } syncedPodList, err := dc.listPods(deployment) if err != nil { return nil, nil, err } return syncedRSList, syncedPodList, nil } // addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps: // 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created // 2. Add hash label to all pods this rs owns, wait until replicaset controller reports rs.Status.FullyLabeledReplicas equal to the desired number of replicas // 3. Add hash label to the rs's label and selector func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { // If the rs already has the new hash label in its selector, it's done syncing if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { return rs, nil } hash := deploymentutil.GetReplicaSetHash(rs) // 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label. updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { // Precondition: the RS doesn't contain the new hash in its pod template label. if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { return utilerrors.ErrPreconditionViolated } updated.Spec.Template.Labels = labelsutil.AddLabel(updated.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) return nil }) if err != nil { return nil, fmt.Errorf("error updating replica set %s/%s pod template label with template hash: %v", rs.Namespace, rs.Name, err) } // Make sure rs pod template is updated so that it won't create pods without the new label (orphaned pods). if updatedRS.Generation > updatedRS.Status.ObservedGeneration { if err = deploymentutil.WaitForReplicaSetUpdated(dc.client, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil { return nil, fmt.Errorf("error waiting for replica set %s/%s to be observed by controller: %v", updatedRS.Namespace, updatedRS.Name, err) } glog.V(4).Infof("Observed the update of replica set %s/%s's pod template with hash %s.", rs.Namespace, rs.Name, hash) } // 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted. selector, err := metav1.LabelSelectorAsSelector(updatedRS.Spec.Selector) if err != nil { return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err) } options := metav1.ListOptions{LabelSelector: selector.String()} parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } pods, err := dc.podLister.Pods(updatedRS.Namespace).List(parsed) if err != nil { return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", rs.Namespace, options, err) } podList := v1.PodList{Items: make([]v1.Pod, 0, len(pods))} for i := range pods { podList.Items = append(podList.Items, *pods[i]) } if err := deploymentutil.LabelPodsWithHash(&podList, dc.client, dc.podLister, rs.Namespace, rs.Name, hash); err != nil { return nil, fmt.Errorf("error in adding template hash label %s to pods %+v: %s", hash, podList, err) } // We need to wait for the replicaset controller to observe the pods being // labeled with pod template hash. Because previously we've called // WaitForReplicaSetUpdated, the replicaset controller should have dropped // FullyLabeledReplicas to 0 already, we only need to wait it to increase // back to the number of replicas in the spec. if err := deploymentutil.WaitForPodsHashPopulated(dc.client, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil { return nil, fmt.Errorf("Replica set %s/%s: error waiting for replicaset controller to observe pods being labeled with template hash: %v", updatedRS.Namespace, updatedRS.Name, err) } // 3. Update rs label and selector to include the new hash label // Copy the old selector, so that we can scrub out any orphaned pods updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { // Precondition: the RS doesn't contain the new hash in its label and selector. if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash { return utilerrors.ErrPreconditionViolated } updated.Labels = labelsutil.AddLabel(updated.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) updated.Spec.Selector = labelsutil.AddLabelToSelector(updated.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, hash) return nil }) // If the RS isn't actually updated, that's okay, we'll retry in the // next sync loop since its selector isn't updated yet. if err != nil { return nil, fmt.Errorf("error updating ReplicaSet %s/%s label and selector with template hash: %v", updatedRS.Namespace, updatedRS.Name, err) } // TODO: look for orphaned pods and label them in the background somewhere else periodically return updatedRS, nil } func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) { return deploymentutil.ListPods(deployment, func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } pods, err := dc.podLister.Pods(namespace).List(parsed) result := v1.PodList{Items: make([]v1.Pod, 0, len(pods))} for i := range pods { result.Items = append(result.Items, *pods[i]) } return &result, err }) } // Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet. // 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's). // 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes. // 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. // Note that the pod-template-hash will be added to adopted RSes and pods. func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) { existingNewRS, err := deploymentutil.FindNewReplicaSet(deployment, rsList) if err != nil { return nil, err } // Calculate the max revision number among all old RSes maxOldRevision := deploymentutil.MaxRevision(oldRSs) // Calculate revision number for this new replica set newRevision := strconv.FormatInt(maxOldRevision+1, 10) // Latest replica set exists. We need to sync its annotations (includes copying all but // annotationsToSkip from the parent deployment, and update revision, desiredReplicas, // and maxReplicas) and also update the revision annotation in the deployment with the // latest revision. if existingNewRS != nil { objCopy, err := api.Scheme.Copy(existingNewRS) if err != nil { return nil, err } rsCopy := objCopy.(*extensions.ReplicaSet) // Set existing new replica set's annotation annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(deployment, rsCopy, newRevision, true) minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != deployment.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = deployment.Spec.MinReadySeconds return dc.client.Extensions().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) } updateConditions := deploymentutil.SetDeploymentRevision(deployment, newRevision) // If no other Progressing condition has been recorded and we need to estimate the progress // of this deployment then it is likely that old users started caring about progress. In that // case we need to take into account the first time we noticed their new replica set. cond := deploymentutil.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing) if deployment.Spec.ProgressDeadlineSeconds != nil && cond == nil { msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name) condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *condition) updateConditions = true } if updateConditions { if deployment, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment); err != nil { return nil, err } } return rsCopy, nil } if !createIfNotExisted { return nil, nil } // new ReplicaSet does not exist, create one. namespace := deployment.Namespace podTemplateSpecHash := fmt.Sprintf("%d", deploymentutil.GetPodTemplateSpecHash(deployment.Spec.Template)) newRSTemplate := deploymentutil.GetNewReplicaSetTemplate(deployment) newRSTemplate.Labels = labelsutil.CloneAndAddLabel(deployment.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Add podTemplateHash label to selector. newRSSelector := labelsutil.CloneSelectorAndAddLabel(deployment.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Create new ReplicaSet newRS := extensions.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ // Make the name deterministic, to ensure idempotence Name: deployment.Name + "-" + podTemplateSpecHash, Namespace: namespace, }, Spec: extensions.ReplicaSetSpec{ Replicas: func(i int32) *int32 { return &i }(0), MinReadySeconds: deployment.Spec.MinReadySeconds, Selector: newRSSelector, Template: newRSTemplate, }, } var trueVar = true controllerRef := &metav1.OwnerReference{ APIVersion: getDeploymentKind().GroupVersion().String(), Kind: getDeploymentKind().Kind, Name: deployment.Name, UID: deployment.UID, Controller: &trueVar, } newRS.OwnerReferences = append(newRS.OwnerReferences, *controllerRef) allRSs := append(oldRSs, &newRS) newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, &newRS) if err != nil { return nil, err } *(newRS.Spec.Replicas) = newReplicasCount // Set new replica set's annotation deploymentutil.SetNewReplicaSetAnnotations(deployment, &newRS, newRevision, false) createdRS, err := dc.client.Extensions().ReplicaSets(namespace).Create(&newRS) switch { // We may end up hitting this due to a slow cache or a fast resync of the deployment. // TODO: Restore once https://github.com/kubernetes/kubernetes/issues/29735 is fixed // ie. we start using a new hashing algorithm. case errors.IsAlreadyExists(err): return nil, err // return dc.rsLister.ReplicaSets(namespace).Get(newRS.Name) case err != nil: msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err) if deployment.Spec.ProgressDeadlineSeconds != nil { cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *cond) // We don't really care about this error at this point, since we have a bigger issue to report. // TODO: Update the rest of the Deployment status, too. We may need to do this every time we // error out in all other places in the controller so that we let users know that their deployments // have been noticed by the controller, albeit with errors. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 _, _ = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(deployment) } dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) return nil, err } if newReplicasCount > 0 { dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount) } deploymentutil.SetDeploymentRevision(deployment, newRevision) if deployment.Spec.ProgressDeadlineSeconds != nil { msg := fmt.Sprintf("Created new replica set %q", createdRS.Name) condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *condition) } _, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment) return createdRS, err } // scale scales proportionally in order to mitigate risk. Otherwise, scaling up can increase the size // of the new replica set and scaling down can decrease the sizes of the old ones, both of which would // have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable // replicas in the event of a problem with the rolled out template. Should run only on scaling events or // when a deployment is paused and not during the normal rollout process. func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) error { // If there is only one active replica set then we should scale that up to the full count of the // deployment. If there is no active replica set, then we should scale up the newest replica set. if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil { if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) { return nil } _, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment) return err } // If the new replica set is saturated, old replica sets should be fully scaled down. // This case handles replica set adoption during a saturated new replica set. if deploymentutil.IsSaturated(deployment, newRS) { for _, old := range controller.FilterActiveReplicaSets(oldRSs) { if _, _, err := dc.scaleReplicaSetAndRecordEvent(old, 0, deployment); err != nil { return err } } return nil } // There are old replica sets with pods and the new replica set is not saturated. // We need to proportionally scale all replica sets (new and old) in case of a // rolling deployment. if deploymentutil.IsRollingUpdate(deployment) { allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS)) allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) allowedSize := int32(0) if *(deployment.Spec.Replicas) > 0 { allowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment) } // Number of additional replicas that can be either added or removed from the total // replicas count. These replicas should be distributed proportionally to the active // replica sets. deploymentReplicasToAdd := allowedSize - allRSsReplicas // The additional replicas should be distributed proportionally amongst the active // replica sets from the larger to the smaller in size replica set. Scaling direction // drives what happens in case we are trying to scale replica sets of the same size. // In such a case when scaling up, we should scale up newer replica sets first, and // when scaling down, we should scale down older replica sets first. var scalingOperation string switch { case deploymentReplicasToAdd > 0: sort.Sort(controller.ReplicaSetsBySizeNewer(allRSs)) scalingOperation = "up" case deploymentReplicasToAdd < 0: sort.Sort(controller.ReplicaSetsBySizeOlder(allRSs)) scalingOperation = "down" } // Iterate over all active replica sets and estimate proportions for each of them. // The absolute value of deploymentReplicasAdded should never exceed the absolute // value of deploymentReplicasToAdd. deploymentReplicasAdded := int32(0) nameToSize := make(map[string]int32) for i := range allRSs { rs := allRSs[i] // Estimate proportions if we have replicas to add, otherwise simply populate // nameToSize with the current sizes for each replica set. if deploymentReplicasToAdd != 0 { proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded) nameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion deploymentReplicasAdded += proportion } else { nameToSize[rs.Name] = *(rs.Spec.Replicas) } } // Update all replica sets for i := range allRSs { rs := allRSs[i] // Add/remove any leftovers to the largest replica set. if i == 0 && deploymentReplicasToAdd != 0 { leftover := deploymentReplicasToAdd - deploymentReplicasAdded nameToSize[rs.Name] = nameToSize[rs.Name] + leftover if nameToSize[rs.Name] < 0 { nameToSize[rs.Name] = 0 } } // TODO: Use transactions when we have them. if _, _, err := dc.scaleReplicaSet(rs, nameToSize[rs.Name], deployment, scalingOperation); err != nil { // Return as soon as we fail, the deployment is requeued return err } } } return nil } func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { // No need to scale if *(rs.Spec.Replicas) == newScale { return false, rs, nil } var scalingOperation string if *(rs.Spec.Replicas) < newScale { scalingOperation = "up" } else { scalingOperation = "down" } scaled, newRS, err := dc.scaleReplicaSet(rs, newScale, deployment, scalingOperation) return scaled, newRS, err } func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment, scalingOperation string) (bool, *extensions.ReplicaSet, error) { objCopy, err := api.Scheme.Copy(rs) if err != nil { return false, nil, err } rsCopy := objCopy.(*extensions.ReplicaSet) sizeNeedsUpdate := *(rsCopy.Spec.Replicas) != newScale // TODO: Do not mutate the replica set here, instead simply compare the annotation and if they mismatch // call SetReplicasAnnotations inside the following if clause. Then we can also move the deep-copy from // above inside the if too. annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment)) scaled := false if sizeNeedsUpdate || annotationsNeedUpdate { *(rsCopy.Spec.Replicas) = newScale rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy) if err == nil && sizeNeedsUpdate { scaled = true dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) } } return scaled, rs, err } // cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets // where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept // around by default 1) for historical reasons and 2) for the ability to rollback a deployment. func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error { if deployment.Spec.RevisionHistoryLimit == nil { return nil } diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit if diff <= 0 { return nil } sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) var errList []error // TODO: This should be parallelized. for i := int32(0); i < diff; i++ { rs := oldRSs[i] // Avoid delete replica set with non-zero replica counts if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration { continue } if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { glog.V(2).Infof("Failed deleting old replica set %v for deployment %v: %v", rs.Name, deployment.Name, err) errList = append(errList, err) } } return utilerrors.NewAggregate(errList) } // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error { newStatus := calculateStatus(allRSs, newRS, d) if reflect.DeepEqual(d.Status, newStatus) { return nil } newDeployment := d newDeployment.Status = newStatus _, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } // calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets. func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) extensions.DeploymentStatus { availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs) totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) unavailableReplicas := totalReplicas - availableReplicas // If unavailableReplicas is negative, then that means the Deployment has more available replicas running than // desired, eg. whenever it scales down. In such a case we should simply default unavailableReplicas to zero. if unavailableReplicas < 0 { unavailableReplicas = 0 } status := extensions.DeploymentStatus{ // TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value. ObservedGeneration: deployment.Generation, Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs), UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}), ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs), AvailableReplicas: availableReplicas, UnavailableReplicas: unavailableReplicas, } // Copy conditions one by one so we won't mutate the original object. conditions := deployment.Status.Conditions for i := range conditions { status.Conditions = append(status.Conditions, conditions[i]) } if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) { minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.") deploymentutil.SetDeploymentCondition(&status, *minAvailability) } else { noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.") deploymentutil.SetDeploymentCondition(&status, *noMinAvailability) } return status } // isScalingEvent checks whether the provided deployment has been updated with a scaling event // by looking at the desired-replicas annotation in the active replica sets of the deployment. func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment) (bool, error) { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, false) if err != nil { return false, err } allRSs := append(oldRSs, newRS) for _, rs := range controller.FilterActiveReplicaSets(allRSs) { desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs) if !ok { continue } if desired != *(d.Spec.Replicas) { return true, nil } } return false, nil }
pkg/controller/deployment/sync.go
1
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.9983899593353271, 0.4649673104286194, 0.0001679071137914434, 0.06117020174860954, 0.48518967628479004 ]
{ "id": 2, "code_window": [ "\n", "// Waits for the deployment to clean up old rcs.\n", "func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {\n", "\treturn wait.Poll(Poll, 5*time.Minute, func() (bool, error) {\n", "\t\tdeployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})\n", "\t\tif err != nil {\n", "\t\t\treturn false, err\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tvar oldRSs []*extensions.ReplicaSet\n", "\tvar d *extensions.Deployment\n", "\n", "\tpollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {\n" ], "file_path": "test/e2e/framework/util.go", "type": "replace", "edit_start_line_idx": 1000 }
All files in this repository are licensed as follows. If you contribute to this repository, it is assumed that you license your contribution under the same license unless you state otherwise. All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library.
staging/src/k8s.io/client-go/_vendor/github.com/juju/ratelimit/LICENSE
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.00017919567471835762, 0.00017422343080397695, 0.00016598115325905383, 0.0001751892559695989, 0.00000356677946911077 ]
{ "id": 2, "code_window": [ "\n", "// Waits for the deployment to clean up old rcs.\n", "func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {\n", "\treturn wait.Poll(Poll, 5*time.Minute, func() (bool, error) {\n", "\t\tdeployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})\n", "\t\tif err != nil {\n", "\t\t\treturn false, err\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tvar oldRSs []*extensions.ReplicaSet\n", "\tvar d *extensions.Deployment\n", "\n", "\tpollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {\n" ], "file_path": "test/e2e/framework/util.go", "type": "replace", "edit_start_line_idx": 1000 }
{ "swaggerVersion": "1.2", "apiVersion": "policy/v1alpha1", "basePath": "https://10.10.10.10:6443", "resourcePath": "/apis/policy/v1alpha1", "info": { "title": "", "description": "" }, "apis": [ { "path": "/apis/policy/v1alpha1/namespaces/{namespace}/poddisruptionbudgets", "description": "API at /apis/policy/v1alpha1", "operations": [ { "type": "v1alpha1.PodDisruptionBudgetList", "method": "GET", "summary": "list or watch objects of kind PodDisruptionBudget", "nickname": "listNamespacedPodDisruptionBudget", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "labelSelector", "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "fieldSelector", "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "watch", "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "resourceVersion", "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", "required": false, "allowMultiple": false }, { "type": "integer", "paramType": "query", "name": "timeoutSeconds", "description": "Timeout for the list/watch call.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudgetList" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf", "application/json;stream=watch", "application/vnd.kubernetes.protobuf;stream=watch" ], "consumes": [ "*/*" ] }, { "type": "v1alpha1.PodDisruptionBudget", "method": "POST", "summary": "create a PodDisruptionBudget", "nickname": "createNamespacedPodDisruptionBudget", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "v1alpha1.PodDisruptionBudget", "paramType": "body", "name": "body", "description": "", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudget" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "*/*" ] }, { "type": "unversioned.Status", "method": "DELETE", "summary": "delete collection of PodDisruptionBudget", "nickname": "deletecollectionNamespacedPodDisruptionBudget", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "labelSelector", "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "fieldSelector", "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "watch", "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "resourceVersion", "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", "required": false, "allowMultiple": false }, { "type": "integer", "paramType": "query", "name": "timeoutSeconds", "description": "Timeout for the list/watch call.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "unversioned.Status" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "*/*" ] } ] }, { "path": "/apis/policy/v1alpha1/watch/namespaces/{namespace}/poddisruptionbudgets", "description": "API at /apis/policy/v1alpha1", "operations": [ { "type": "versioned.Event", "method": "GET", "summary": "watch individual changes to a list of PodDisruptionBudget", "nickname": "watchNamespacedPodDisruptionBudgetList", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "labelSelector", "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "fieldSelector", "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "watch", "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "resourceVersion", "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", "required": false, "allowMultiple": false }, { "type": "integer", "paramType": "query", "name": "timeoutSeconds", "description": "Timeout for the list/watch call.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "versioned.Event" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf", "application/json;stream=watch", "application/vnd.kubernetes.protobuf;stream=watch" ], "consumes": [ "*/*" ] } ] }, { "path": "/apis/policy/v1alpha1/namespaces/{namespace}/poddisruptionbudgets/{name}", "description": "API at /apis/policy/v1alpha1", "operations": [ { "type": "v1alpha1.PodDisruptionBudget", "method": "GET", "summary": "read the specified PodDisruptionBudget", "nickname": "readNamespacedPodDisruptionBudget", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "export", "description": "Should this value be exported. Export strips fields that a user can not specify.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "exact", "description": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "name", "description": "name of the PodDisruptionBudget", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudget" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "*/*" ] }, { "type": "v1alpha1.PodDisruptionBudget", "method": "PUT", "summary": "replace the specified PodDisruptionBudget", "nickname": "replaceNamespacedPodDisruptionBudget", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "v1alpha1.PodDisruptionBudget", "paramType": "body", "name": "body", "description": "", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "name", "description": "name of the PodDisruptionBudget", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudget" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "*/*" ] }, { "type": "v1alpha1.PodDisruptionBudget", "method": "PATCH", "summary": "partially update the specified PodDisruptionBudget", "nickname": "patchNamespacedPodDisruptionBudget", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "unversioned.Patch", "paramType": "body", "name": "body", "description": "", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "name", "description": "name of the PodDisruptionBudget", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudget" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "application/json-patch+json", "application/merge-patch+json", "application/strategic-merge-patch+json" ] }, { "type": "unversioned.Status", "method": "DELETE", "summary": "delete a PodDisruptionBudget", "nickname": "deleteNamespacedPodDisruptionBudget", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "v1.DeleteOptions", "paramType": "body", "name": "body", "description": "", "required": true, "allowMultiple": false }, { "type": "integer", "paramType": "query", "name": "gracePeriodSeconds", "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "orphanDependents", "description": "Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "name", "description": "name of the PodDisruptionBudget", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "unversioned.Status" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "*/*" ] } ] }, { "path": "/apis/policy/v1alpha1/watch/namespaces/{namespace}/poddisruptionbudgets/{name}", "description": "API at /apis/policy/v1alpha1", "operations": [ { "type": "versioned.Event", "method": "GET", "summary": "watch changes to an object of kind PodDisruptionBudget", "nickname": "watchNamespacedPodDisruptionBudget", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "labelSelector", "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "fieldSelector", "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "watch", "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "resourceVersion", "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", "required": false, "allowMultiple": false }, { "type": "integer", "paramType": "query", "name": "timeoutSeconds", "description": "Timeout for the list/watch call.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "name", "description": "name of the PodDisruptionBudget", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "versioned.Event" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf", "application/json;stream=watch", "application/vnd.kubernetes.protobuf;stream=watch" ], "consumes": [ "*/*" ] } ] }, { "path": "/apis/policy/v1alpha1/poddisruptionbudgets", "description": "API at /apis/policy/v1alpha1", "operations": [ { "type": "v1alpha1.PodDisruptionBudgetList", "method": "GET", "summary": "list or watch objects of kind PodDisruptionBudget", "nickname": "listPodDisruptionBudgetForAllNamespaces", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "labelSelector", "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "fieldSelector", "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "watch", "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "resourceVersion", "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", "required": false, "allowMultiple": false }, { "type": "integer", "paramType": "query", "name": "timeoutSeconds", "description": "Timeout for the list/watch call.", "required": false, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudgetList" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf", "application/json;stream=watch", "application/vnd.kubernetes.protobuf;stream=watch" ], "consumes": [ "*/*" ] } ] }, { "path": "/apis/policy/v1alpha1/watch/poddisruptionbudgets", "description": "API at /apis/policy/v1alpha1", "operations": [ { "type": "versioned.Event", "method": "GET", "summary": "watch individual changes to a list of PodDisruptionBudget", "nickname": "watchPodDisruptionBudgetListForAllNamespaces", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "labelSelector", "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "fieldSelector", "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", "required": false, "allowMultiple": false }, { "type": "boolean", "paramType": "query", "name": "watch", "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "query", "name": "resourceVersion", "description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.", "required": false, "allowMultiple": false }, { "type": "integer", "paramType": "query", "name": "timeoutSeconds", "description": "Timeout for the list/watch call.", "required": false, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "versioned.Event" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf", "application/json;stream=watch", "application/vnd.kubernetes.protobuf;stream=watch" ], "consumes": [ "*/*" ] } ] }, { "path": "/apis/policy/v1alpha1/namespaces/{namespace}/poddisruptionbudgets/{name}/status", "description": "API at /apis/policy/v1alpha1", "operations": [ { "type": "v1alpha1.PodDisruptionBudget", "method": "GET", "summary": "read status of the specified PodDisruptionBudget", "nickname": "readNamespacedPodDisruptionBudgetStatus", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "name", "description": "name of the PodDisruptionBudget", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudget" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "*/*" ] }, { "type": "v1alpha1.PodDisruptionBudget", "method": "PUT", "summary": "replace status of the specified PodDisruptionBudget", "nickname": "replaceNamespacedPodDisruptionBudgetStatus", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "v1alpha1.PodDisruptionBudget", "paramType": "body", "name": "body", "description": "", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "name", "description": "name of the PodDisruptionBudget", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudget" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "*/*" ] }, { "type": "v1alpha1.PodDisruptionBudget", "method": "PATCH", "summary": "partially update status of the specified PodDisruptionBudget", "nickname": "patchNamespacedPodDisruptionBudgetStatus", "parameters": [ { "type": "string", "paramType": "query", "name": "pretty", "description": "If 'true', then the output is pretty printed.", "required": false, "allowMultiple": false }, { "type": "unversioned.Patch", "paramType": "body", "name": "body", "description": "", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "namespace", "description": "object name and auth scope, such as for teams and projects", "required": true, "allowMultiple": false }, { "type": "string", "paramType": "path", "name": "name", "description": "name of the PodDisruptionBudget", "required": true, "allowMultiple": false } ], "responseMessages": [ { "code": 200, "message": "OK", "responseModel": "v1alpha1.PodDisruptionBudget" } ], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "application/json-patch+json", "application/merge-patch+json", "application/strategic-merge-patch+json" ] } ] }, { "path": "/apis/policy/v1alpha1", "description": "API at /apis/policy/v1alpha1", "operations": [ { "type": "unversioned.APIResourceList", "method": "GET", "summary": "get available resources", "nickname": "getAPIResources", "parameters": [], "produces": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ], "consumes": [ "application/json", "application/yaml", "application/vnd.kubernetes.protobuf" ] } ] } ], "models": { "v1alpha1.PodDisruptionBudgetList": { "id": "v1alpha1.PodDisruptionBudgetList", "description": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", "required": [ "items" ], "properties": { "kind": { "type": "string", "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" }, "apiVersion": { "type": "string", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources" }, "metadata": { "$ref": "unversioned.ListMeta" }, "items": { "type": "array", "items": { "$ref": "v1alpha1.PodDisruptionBudget" } } } }, "unversioned.ListMeta": { "id": "unversioned.ListMeta", "description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", "properties": { "selfLink": { "type": "string", "description": "SelfLink is a URL representing this object. Populated by the system. Read-only." }, "resourceVersion": { "type": "string", "description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency" } } }, "v1alpha1.PodDisruptionBudget": { "id": "v1alpha1.PodDisruptionBudget", "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", "properties": { "kind": { "type": "string", "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" }, "apiVersion": { "type": "string", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources" }, "metadata": { "$ref": "v1.ObjectMeta" }, "spec": { "$ref": "v1alpha1.PodDisruptionBudgetSpec", "description": "Specification of the desired behavior of the PodDisruptionBudget." }, "status": { "$ref": "v1alpha1.PodDisruptionBudgetStatus", "description": "Most recently observed status of the PodDisruptionBudget." } } }, "v1.ObjectMeta": { "id": "v1.ObjectMeta", "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "properties": { "name": { "type": "string", "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names" }, "generateName": { "type": "string", "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#idempotency" }, "namespace": { "type": "string", "description": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" }, "selfLink": { "type": "string", "description": "SelfLink is a URL representing this object. Populated by the system. Read-only." }, "uid": { "type": "string", "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" }, "resourceVersion": { "type": "string", "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#concurrency-control-and-consistency" }, "generation": { "type": "integer", "format": "int64", "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only." }, "creationTimestamp": { "type": "string", "format": "date-time", "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata" }, "deletionTimestamp": { "type": "string", "format": "date-time", "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata" }, "deletionGracePeriodSeconds": { "type": "integer", "format": "int64", "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only." }, "labels": { "type": "object", "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels" }, "annotations": { "type": "object", "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations" }, "ownerReferences": { "type": "array", "items": { "$ref": "v1.OwnerReference" }, "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller." }, "finalizers": { "type": "array", "items": { "type": "string" }, "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed." }, "clusterName": { "type": "string", "description": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request." } } }, "v1.OwnerReference": { "id": "v1.OwnerReference", "description": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", "required": [ "apiVersion", "kind", "name", "uid" ], "properties": { "apiVersion": { "type": "string", "description": "API version of the referent." }, "kind": { "type": "string", "description": "Kind of the referent. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" }, "name": { "type": "string", "description": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names" }, "uid": { "type": "string", "description": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids" }, "controller": { "type": "boolean", "description": "If true, this reference points to the managing controller." } } }, "v1alpha1.PodDisruptionBudgetSpec": { "id": "v1alpha1.PodDisruptionBudgetSpec", "description": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", "properties": { "minAvailable": { "type": "string", "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\"." }, "selector": { "$ref": "unversioned.LabelSelector", "description": "Label query over pods whose evictions are managed by the disruption budget." } } }, "unversioned.LabelSelector": { "id": "unversioned.LabelSelector", "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "properties": { "matchLabels": { "type": "object", "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed." }, "matchExpressions": { "type": "array", "items": { "$ref": "unversioned.LabelSelectorRequirement" }, "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed." } } }, "unversioned.LabelSelectorRequirement": { "id": "unversioned.LabelSelectorRequirement", "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", "required": [ "key", "operator" ], "properties": { "key": { "type": "string", "description": "key is the label key that the selector applies to." }, "operator": { "type": "string", "description": "operator represents a key's relationship to a set of values. Valid operators ard In, NotIn, Exists and DoesNotExist." }, "values": { "type": "array", "items": { "type": "string" }, "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch." } } }, "v1alpha1.PodDisruptionBudgetStatus": { "id": "v1alpha1.PodDisruptionBudgetStatus", "description": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", "required": [ "disruptionAllowed", "currentHealthy", "desiredHealthy", "expectedPods" ], "properties": { "disruptionAllowed": { "type": "boolean", "description": "Whether or not a disruption is currently allowed." }, "currentHealthy": { "type": "integer", "format": "int32", "description": "current number of healthy pods" }, "desiredHealthy": { "type": "integer", "format": "int32", "description": "minimum desired number of healthy pods" }, "expectedPods": { "type": "integer", "format": "int32", "description": "total number of pods counted by this disruption budget" } } }, "unversioned.Status": { "id": "unversioned.Status", "description": "Status is a return value for calls that don't return other objects.", "properties": { "kind": { "type": "string", "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" }, "apiVersion": { "type": "string", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources" }, "metadata": { "$ref": "unversioned.ListMeta", "description": "Standard list metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" }, "status": { "type": "string", "description": "Status of the operation. One of: \"Success\" or \"Failure\". More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status" }, "message": { "type": "string", "description": "A human-readable description of the status of this operation." }, "reason": { "type": "string", "description": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it." }, "details": { "$ref": "unversioned.StatusDetails", "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type." }, "code": { "type": "integer", "format": "int32", "description": "Suggested HTTP return code for this status, 0 if not set." } } }, "unversioned.StatusDetails": { "id": "unversioned.StatusDetails", "description": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "properties": { "name": { "type": "string", "description": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described)." }, "group": { "type": "string", "description": "The group attribute of the resource associated with the status StatusReason." }, "kind": { "type": "string", "description": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" }, "causes": { "type": "array", "items": { "$ref": "unversioned.StatusCause" }, "description": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes." }, "retryAfterSeconds": { "type": "integer", "format": "int32", "description": "If specified, the time in seconds before the operation should be retried." } } }, "unversioned.StatusCause": { "id": "unversioned.StatusCause", "description": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", "properties": { "reason": { "type": "string", "description": "A machine-readable description of the cause of the error. If this value is empty there is no information available." }, "message": { "type": "string", "description": "A human-readable description of the cause of the error. This field may be presented as-is to a reader." }, "field": { "type": "string", "description": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"" } } }, "versioned.Event": { "id": "versioned.Event", "required": [ "type", "object" ], "properties": { "type": { "type": "string" }, "object": { "type": "string" } } }, "unversioned.Patch": { "id": "unversioned.Patch", "description": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", "properties": {} }, "v1.DeleteOptions": { "id": "v1.DeleteOptions", "description": "DeleteOptions may be provided when deleting an API object", "properties": { "kind": { "type": "string", "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" }, "apiVersion": { "type": "string", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources" }, "gracePeriodSeconds": { "type": "integer", "format": "int64", "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately." }, "preconditions": { "$ref": "v1.Preconditions", "description": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned." }, "orphanDependents": { "type": "boolean", "description": "Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list." } } }, "v1.Preconditions": { "id": "v1.Preconditions", "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", "properties": { "uid": { "$ref": "types.UID", "description": "Specifies the target UID." } } }, "types.UID": { "id": "types.UID", "properties": {} }, "unversioned.APIResourceList": { "id": "unversioned.APIResourceList", "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", "required": [ "groupVersion", "resources" ], "properties": { "kind": { "type": "string", "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds" }, "apiVersion": { "type": "string", "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources" }, "groupVersion": { "type": "string", "description": "groupVersion is the group and version this APIResourceList is for." }, "resources": { "type": "array", "items": { "$ref": "unversioned.APIResource" }, "description": "resources contains the name of the resources and if they are namespaced." } } }, "unversioned.APIResource": { "id": "unversioned.APIResource", "description": "APIResource specifies the name of a resource and whether it is namespaced.", "required": [ "name", "namespaced", "kind" ], "properties": { "name": { "type": "string", "description": "name is the name of the resource." }, "namespaced": { "type": "boolean", "description": "namespaced indicates if a resource is namespaced or not." }, "kind": { "type": "string", "description": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')" } } } } }
api/swagger-spec/policy_v1alpha1.json
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.00017600350838620216, 0.00017187412595376372, 0.00016544786922167987, 0.00017190266225952655, 0.0000016189562757062959 ]
{ "id": 2, "code_window": [ "\n", "// Waits for the deployment to clean up old rcs.\n", "func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string, desiredRSNum int) error {\n", "\treturn wait.Poll(Poll, 5*time.Minute, func() (bool, error) {\n", "\t\tdeployment, err := c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})\n", "\t\tif err != nil {\n", "\t\t\treturn false, err\n", "\t\t}\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tvar oldRSs []*extensions.ReplicaSet\n", "\tvar d *extensions.Deployment\n", "\n", "\tpollErr := wait.PollImmediate(Poll, 5*time.Minute, func() (bool, error) {\n" ], "file_path": "test/e2e/framework/util.go", "type": "replace", "edit_start_line_idx": 1000 }
// Copyright © 2015 Steve Francia <[email protected]>. // Copyright 2013 tsuru authors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package mem import ( "bytes" "errors" "io" "os" "path/filepath" "sync" "sync/atomic" ) import "time" const FilePathSeparator = string(filepath.Separator) type File struct { // atomic requires 64-bit alignment for struct field access at int64 readDirCount int64 closed bool readOnly bool fileData *FileData } func NewFileHandle(data *FileData) *File { return &File{fileData: data} } func NewReadOnlyFileHandle(data *FileData) *File { return &File{fileData: data, readOnly: true} } func (f File) Data() *FileData { return f.fileData } type FileData struct { sync.Mutex name string data []byte memDir Dir dir bool mode os.FileMode modtime time.Time } func (d FileData) Name() string { return d.name } func CreateFile(name string) *FileData { return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} } func CreateDir(name string) *FileData { return &FileData{name: name, memDir: &DirMap{}, dir: true} } func ChangeFileName(f *FileData, newname string) { f.name = newname } func SetMode(f *FileData, mode os.FileMode) { f.mode = mode } func SetModTime(f *FileData, mtime time.Time) { f.modtime = mtime } func GetFileInfo(f *FileData) *FileInfo { return &FileInfo{f} } func (f *File) Open() error { atomic.StoreInt64(&f.at, 0) atomic.StoreInt64(&f.readDirCount, 0) f.fileData.Lock() f.closed = false f.fileData.Unlock() return nil } func (f *File) Close() error { f.fileData.Lock() f.closed = true if !f.readOnly { SetModTime(f.fileData, time.Now()) } f.fileData.Unlock() return nil } func (f *File) Name() string { return f.fileData.name } func (f *File) Stat() (os.FileInfo, error) { return &FileInfo{f.fileData}, nil } func (f *File) Sync() error { return nil } func (f *File) Readdir(count int) (res []os.FileInfo, err error) { var outLength int64 f.fileData.Lock() files := f.fileData.memDir.Files()[f.readDirCount:] if count > 0 { if len(files) < count { outLength = int64(len(files)) } else { outLength = int64(count) } if len(files) == 0 { err = io.EOF } } else { outLength = int64(len(files)) } f.readDirCount += outLength f.fileData.Unlock() res = make([]os.FileInfo, outLength) for i := range res { res[i] = &FileInfo{files[i]} } return res, err } func (f *File) Readdirnames(n int) (names []string, err error) { fi, err := f.Readdir(n) names = make([]string, len(fi)) for i, f := range fi { _, names[i] = filepath.Split(f.Name()) } return names, err } func (f *File) Read(b []byte) (n int, err error) { f.fileData.Lock() defer f.fileData.Unlock() if f.closed == true { return 0, ErrFileClosed } if len(b) > 0 && int(f.at) == len(f.fileData.data) { return 0, io.EOF } if len(f.fileData.data)-int(f.at) >= len(b) { n = len(b) } else { n = len(f.fileData.data) - int(f.at) } copy(b, f.fileData.data[f.at:f.at+int64(n)]) atomic.AddInt64(&f.at, int64(n)) return } func (f *File) ReadAt(b []byte, off int64) (n int, err error) { atomic.StoreInt64(&f.at, off) return f.Read(b) } func (f *File) Truncate(size int64) error { if f.closed == true { return ErrFileClosed } if f.readOnly { return &os.PathError{"truncate", f.fileData.name, errors.New("file handle is read only")} } if size < 0 { return ErrOutOfRange } if size > int64(len(f.fileData.data)) { diff := size - int64(len(f.fileData.data)) f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...) } else { f.fileData.data = f.fileData.data[0:size] } SetModTime(f.fileData, time.Now()) return nil } func (f *File) Seek(offset int64, whence int) (int64, error) { if f.closed == true { return 0, ErrFileClosed } switch whence { case 0: atomic.StoreInt64(&f.at, offset) case 1: atomic.AddInt64(&f.at, int64(offset)) case 2: atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) } return f.at, nil } func (f *File) Write(b []byte) (n int, err error) { if f.readOnly { return 0, &os.PathError{"write", f.fileData.name, errors.New("file handle is read only")} } n = len(b) cur := atomic.LoadInt64(&f.at) f.fileData.Lock() defer f.fileData.Unlock() diff := cur - int64(len(f.fileData.data)) var tail []byte if n+int(cur) < len(f.fileData.data) { tail = f.fileData.data[n+int(cur):] } if diff > 0 { f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...) f.fileData.data = append(f.fileData.data, tail...) } else { f.fileData.data = append(f.fileData.data[:cur], b...) f.fileData.data = append(f.fileData.data, tail...) } SetModTime(f.fileData, time.Now()) atomic.StoreInt64(&f.at, int64(len(f.fileData.data))) return } func (f *File) WriteAt(b []byte, off int64) (n int, err error) { atomic.StoreInt64(&f.at, off) return f.Write(b) } func (f *File) WriteString(s string) (ret int, err error) { return f.Write([]byte(s)) } func (f *File) Info() *FileInfo { return &FileInfo{f.fileData} } type FileInfo struct { *FileData } // Implements os.FileInfo func (s *FileInfo) Name() string { _, name := filepath.Split(s.name) return name } func (s *FileInfo) Mode() os.FileMode { return s.mode } func (s *FileInfo) ModTime() time.Time { return s.modtime } func (s *FileInfo) IsDir() bool { return s.dir } func (s *FileInfo) Sys() interface{} { return nil } func (s *FileInfo) Size() int64 { if s.IsDir() { return int64(42) } return int64(len(s.data)) } var ( ErrFileClosed = errors.New("File is closed") ErrOutOfRange = errors.New("Out of range") ErrTooLarge = errors.New("Too large") ErrFileNotFound = os.ErrNotExist ErrFileExists = os.ErrExist ErrDestinationExists = os.ErrExist )
vendor/github.com/spf13/afero/mem/file.go
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.00017870409647002816, 0.00017135894449893385, 0.00016587277059443295, 0.0001712242519715801, 0.0000030136670829961076 ]
{ "id": 4, "code_window": [ "\t\tif err != nil {\n", "\t\t\treturn false, err\n", "\t\t}\n", "\t\treturn len(oldRSs) == desiredRSNum, nil\n", "\t})\n", "}\n", "\n", "func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {\n", "\tif newRS != nil {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif pollErr == wait.ErrWaitTimeout {\n", "\t\tpollErr = fmt.Errorf(\"%d old replica sets were not cleaned up for deployment %q\", len(oldRSs)-desiredRSNum, deploymentName)\n", "\t\tlogReplicaSetsOfDeployment(d, oldRSs, nil)\n", "\t}\n", "\treturn pollErr\n" ], "file_path": "test/e2e/framework/util.go", "type": "add", "edit_start_line_idx": 1011 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package deployment import ( "fmt" "reflect" "sort" "strconv" "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" labelsutil "k8s.io/kubernetes/pkg/util/labels" ) // syncStatusOnly only updates Deployments Status and doesn't take any mutating actions. func (dc *DeploymentController) syncStatusOnly(deployment *extensions.Deployment) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false) if err != nil { return err } allRSs := append(oldRSs, newRS) return dc.syncDeploymentStatus(allRSs, newRS, deployment) } // sync is responsible for reconciling deployments on scaling events or when they // are paused. func (dc *DeploymentController) sync(deployment *extensions.Deployment) error { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false) if err != nil { return err } if err := dc.scale(deployment, newRS, oldRSs); err != nil { // If we get an error while trying to scale, the deployment will be requeued // so we can abort this resync return err } allRSs := append(oldRSs, newRS) return dc.syncDeploymentStatus(allRSs, newRS, deployment) } // checkPausedConditions checks if the given deployment is paused or not and adds an appropriate condition. // These conditions are needed so that we won't accidentally report lack of progress for resumed deployments // that were paused for longer than progressDeadlineSeconds. func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment) error { if d.Spec.ProgressDeadlineSeconds == nil { return nil } cond := deploymentutil.GetDeploymentCondition(d.Status, extensions.DeploymentProgressing) if cond != nil && cond.Reason == deploymentutil.TimedOutReason { // If we have reported lack of progress, do not overwrite it with a paused condition. return nil } pausedCondExists := cond != nil && cond.Reason == deploymentutil.PausedDeployReason needsUpdate := false if d.Spec.Paused && !pausedCondExists { condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } else if !d.Spec.Paused && pausedCondExists { condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed") deploymentutil.SetDeploymentCondition(&d.Status, *condition) needsUpdate = true } if !needsUpdate { return nil } var err error d, err = dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) return err } // getAllReplicaSetsAndSyncRevision returns all the replica sets for the provided deployment (new and all old), with new RS's and deployment's revision updated. // 1. Get all old RSes this deployment targets, and calculate the max revision number among them (maxOldV). // 2. Get new RS this deployment targets (whose pod template matches deployment's), and update new RS's revision number to (maxOldV + 1), // only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. // 3. Copy new RS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of replica sets, thus incorrect deployment status. func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *extensions.Deployment, createIfNotExisted bool) (*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { // List the deployment's RSes & Pods and apply pod-template-hash info to deployment's adopted RSes/Pods rsList, podList, err := dc.rsAndPodsWithHashKeySynced(deployment) if err != nil { return nil, nil, fmt.Errorf("error labeling replica sets and pods with pod-template-hash: %v", err) } _, allOldRSs, err := deploymentutil.FindOldReplicaSets(deployment, rsList, podList) if err != nil { return nil, nil, err } // Get new replica set with the updated revision number newRS, err := dc.getNewReplicaSet(deployment, rsList, allOldRSs, createIfNotExisted) if err != nil { return nil, nil, err } return newRS, allOldRSs, nil } // rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced. func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) { rsList, err := deploymentutil.ListReplicaSets(deployment, func(namespace string, options metav1.ListOptions) ([]*extensions.ReplicaSet, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } return dc.rsLister.ReplicaSets(namespace).List(parsed) }) if err != nil { return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err) } syncedRSList := []*extensions.ReplicaSet{} for _, rs := range rsList { // Add pod-template-hash information if it's not in the RS. // Otherwise, new RS produced by Deployment will overlap with pre-existing ones // that aren't constrained by the pod-template-hash. syncedRS, err := dc.addHashKeyToRSAndPods(rs) if err != nil { return nil, nil, err } syncedRSList = append(syncedRSList, syncedRS) } syncedPodList, err := dc.listPods(deployment) if err != nil { return nil, nil, err } return syncedRSList, syncedPodList, nil } // addHashKeyToRSAndPods adds pod-template-hash information to the given rs, if it's not already there, with the following steps: // 1. Add hash label to the rs's pod template, and make sure the controller sees this update so that no orphaned pods will be created // 2. Add hash label to all pods this rs owns, wait until replicaset controller reports rs.Status.FullyLabeledReplicas equal to the desired number of replicas // 3. Add hash label to the rs's label and selector func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet) (*extensions.ReplicaSet, error) { // If the rs already has the new hash label in its selector, it's done syncing if labelsutil.SelectorHasLabel(rs.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey) { return rs, nil } hash := deploymentutil.GetReplicaSetHash(rs) // 1. Add hash template label to the rs. This ensures that any newly created pods will have the new label. updatedRS, err := deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { // Precondition: the RS doesn't contain the new hash in its pod template label. if updated.Spec.Template.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { return utilerrors.ErrPreconditionViolated } updated.Spec.Template.Labels = labelsutil.AddLabel(updated.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) return nil }) if err != nil { return nil, fmt.Errorf("error updating replica set %s/%s pod template label with template hash: %v", rs.Namespace, rs.Name, err) } // Make sure rs pod template is updated so that it won't create pods without the new label (orphaned pods). if updatedRS.Generation > updatedRS.Status.ObservedGeneration { if err = deploymentutil.WaitForReplicaSetUpdated(dc.client, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil { return nil, fmt.Errorf("error waiting for replica set %s/%s to be observed by controller: %v", updatedRS.Namespace, updatedRS.Name, err) } glog.V(4).Infof("Observed the update of replica set %s/%s's pod template with hash %s.", rs.Namespace, rs.Name, hash) } // 2. Update all pods managed by the rs to have the new hash label, so they will be correctly adopted. selector, err := metav1.LabelSelectorAsSelector(updatedRS.Spec.Selector) if err != nil { return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err) } options := metav1.ListOptions{LabelSelector: selector.String()} parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } pods, err := dc.podLister.Pods(updatedRS.Namespace).List(parsed) if err != nil { return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", rs.Namespace, options, err) } podList := v1.PodList{Items: make([]v1.Pod, 0, len(pods))} for i := range pods { podList.Items = append(podList.Items, *pods[i]) } if err := deploymentutil.LabelPodsWithHash(&podList, dc.client, dc.podLister, rs.Namespace, rs.Name, hash); err != nil { return nil, fmt.Errorf("error in adding template hash label %s to pods %+v: %s", hash, podList, err) } // We need to wait for the replicaset controller to observe the pods being // labeled with pod template hash. Because previously we've called // WaitForReplicaSetUpdated, the replicaset controller should have dropped // FullyLabeledReplicas to 0 already, we only need to wait it to increase // back to the number of replicas in the spec. if err := deploymentutil.WaitForPodsHashPopulated(dc.client, updatedRS.Generation, updatedRS.Namespace, updatedRS.Name); err != nil { return nil, fmt.Errorf("Replica set %s/%s: error waiting for replicaset controller to observe pods being labeled with template hash: %v", updatedRS.Namespace, updatedRS.Name, err) } // 3. Update rs label and selector to include the new hash label // Copy the old selector, so that we can scrub out any orphaned pods updatedRS, err = deploymentutil.UpdateRSWithRetries(dc.client.Extensions().ReplicaSets(rs.Namespace), dc.rsLister, rs.Namespace, rs.Name, func(updated *extensions.ReplicaSet) error { // Precondition: the RS doesn't contain the new hash in its label and selector. if updated.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash && updated.Spec.Selector.MatchLabels[extensions.DefaultDeploymentUniqueLabelKey] == hash { return utilerrors.ErrPreconditionViolated } updated.Labels = labelsutil.AddLabel(updated.Labels, extensions.DefaultDeploymentUniqueLabelKey, hash) updated.Spec.Selector = labelsutil.AddLabelToSelector(updated.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, hash) return nil }) // If the RS isn't actually updated, that's okay, we'll retry in the // next sync loop since its selector isn't updated yet. if err != nil { return nil, fmt.Errorf("error updating ReplicaSet %s/%s label and selector with template hash: %v", updatedRS.Namespace, updatedRS.Name, err) } // TODO: look for orphaned pods and label them in the background somewhere else periodically return updatedRS, nil } func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) { return deploymentutil.ListPods(deployment, func(namespace string, options metav1.ListOptions) (*v1.PodList, error) { parsed, err := labels.Parse(options.LabelSelector) if err != nil { return nil, err } pods, err := dc.podLister.Pods(namespace).List(parsed) result := v1.PodList{Items: make([]v1.Pod, 0, len(pods))} for i := range pods { result.Items = append(result.Items, *pods[i]) } return &result, err }) } // Returns a replica set that matches the intent of the given deployment. Returns nil if the new replica set doesn't exist yet. // 1. Get existing new RS (the RS that the given deployment targets, whose pod template is the same as deployment's). // 2. If there's existing new RS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old RSes. // 3. If there's no existing new RS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. // Note that the pod-template-hash will be added to adopted RSes and pods. func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployment, rsList, oldRSs []*extensions.ReplicaSet, createIfNotExisted bool) (*extensions.ReplicaSet, error) { existingNewRS, err := deploymentutil.FindNewReplicaSet(deployment, rsList) if err != nil { return nil, err } // Calculate the max revision number among all old RSes maxOldRevision := deploymentutil.MaxRevision(oldRSs) // Calculate revision number for this new replica set newRevision := strconv.FormatInt(maxOldRevision+1, 10) // Latest replica set exists. We need to sync its annotations (includes copying all but // annotationsToSkip from the parent deployment, and update revision, desiredReplicas, // and maxReplicas) and also update the revision annotation in the deployment with the // latest revision. if existingNewRS != nil { objCopy, err := api.Scheme.Copy(existingNewRS) if err != nil { return nil, err } rsCopy := objCopy.(*extensions.ReplicaSet) // Set existing new replica set's annotation annotationsUpdated := deploymentutil.SetNewReplicaSetAnnotations(deployment, rsCopy, newRevision, true) minReadySecondsNeedsUpdate := rsCopy.Spec.MinReadySeconds != deployment.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { rsCopy.Spec.MinReadySeconds = deployment.Spec.MinReadySeconds return dc.client.Extensions().ReplicaSets(rsCopy.ObjectMeta.Namespace).Update(rsCopy) } updateConditions := deploymentutil.SetDeploymentRevision(deployment, newRevision) // If no other Progressing condition has been recorded and we need to estimate the progress // of this deployment then it is likely that old users started caring about progress. In that // case we need to take into account the first time we noticed their new replica set. cond := deploymentutil.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing) if deployment.Spec.ProgressDeadlineSeconds != nil && cond == nil { msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name) condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *condition) updateConditions = true } if updateConditions { if deployment, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment); err != nil { return nil, err } } return rsCopy, nil } if !createIfNotExisted { return nil, nil } // new ReplicaSet does not exist, create one. namespace := deployment.Namespace podTemplateSpecHash := fmt.Sprintf("%d", deploymentutil.GetPodTemplateSpecHash(deployment.Spec.Template)) newRSTemplate := deploymentutil.GetNewReplicaSetTemplate(deployment) newRSTemplate.Labels = labelsutil.CloneAndAddLabel(deployment.Spec.Template.Labels, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Add podTemplateHash label to selector. newRSSelector := labelsutil.CloneSelectorAndAddLabel(deployment.Spec.Selector, extensions.DefaultDeploymentUniqueLabelKey, podTemplateSpecHash) // Create new ReplicaSet newRS := extensions.ReplicaSet{ ObjectMeta: metav1.ObjectMeta{ // Make the name deterministic, to ensure idempotence Name: deployment.Name + "-" + podTemplateSpecHash, Namespace: namespace, }, Spec: extensions.ReplicaSetSpec{ Replicas: func(i int32) *int32 { return &i }(0), MinReadySeconds: deployment.Spec.MinReadySeconds, Selector: newRSSelector, Template: newRSTemplate, }, } var trueVar = true controllerRef := &metav1.OwnerReference{ APIVersion: getDeploymentKind().GroupVersion().String(), Kind: getDeploymentKind().Kind, Name: deployment.Name, UID: deployment.UID, Controller: &trueVar, } newRS.OwnerReferences = append(newRS.OwnerReferences, *controllerRef) allRSs := append(oldRSs, &newRS) newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, &newRS) if err != nil { return nil, err } *(newRS.Spec.Replicas) = newReplicasCount // Set new replica set's annotation deploymentutil.SetNewReplicaSetAnnotations(deployment, &newRS, newRevision, false) createdRS, err := dc.client.Extensions().ReplicaSets(namespace).Create(&newRS) switch { // We may end up hitting this due to a slow cache or a fast resync of the deployment. // TODO: Restore once https://github.com/kubernetes/kubernetes/issues/29735 is fixed // ie. we start using a new hashing algorithm. case errors.IsAlreadyExists(err): return nil, err // return dc.rsLister.ReplicaSets(namespace).Get(newRS.Name) case err != nil: msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err) if deployment.Spec.ProgressDeadlineSeconds != nil { cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *cond) // We don't really care about this error at this point, since we have a bigger issue to report. // TODO: Update the rest of the Deployment status, too. We may need to do this every time we // error out in all other places in the controller so that we let users know that their deployments // have been noticed by the controller, albeit with errors. // TODO: Identify which errors are permanent and switch DeploymentIsFailed to take into account // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 _, _ = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(deployment) } dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) return nil, err } if newReplicasCount > 0 { dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount) } deploymentutil.SetDeploymentRevision(deployment, newRevision) if deployment.Spec.ProgressDeadlineSeconds != nil { msg := fmt.Sprintf("Created new replica set %q", createdRS.Name) condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg) deploymentutil.SetDeploymentCondition(&deployment.Status, *condition) } _, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment) return createdRS, err } // scale scales proportionally in order to mitigate risk. Otherwise, scaling up can increase the size // of the new replica set and scaling down can decrease the sizes of the old ones, both of which would // have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable // replicas in the event of a problem with the rolled out template. Should run only on scaling events or // when a deployment is paused and not during the normal rollout process. func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet) error { // If there is only one active replica set then we should scale that up to the full count of the // deployment. If there is no active replica set, then we should scale up the newest replica set. if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil { if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) { return nil } _, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment) return err } // If the new replica set is saturated, old replica sets should be fully scaled down. // This case handles replica set adoption during a saturated new replica set. if deploymentutil.IsSaturated(deployment, newRS) { for _, old := range controller.FilterActiveReplicaSets(oldRSs) { if _, _, err := dc.scaleReplicaSetAndRecordEvent(old, 0, deployment); err != nil { return err } } return nil } // There are old replica sets with pods and the new replica set is not saturated. // We need to proportionally scale all replica sets (new and old) in case of a // rolling deployment. if deploymentutil.IsRollingUpdate(deployment) { allRSs := controller.FilterActiveReplicaSets(append(oldRSs, newRS)) allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) allowedSize := int32(0) if *(deployment.Spec.Replicas) > 0 { allowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment) } // Number of additional replicas that can be either added or removed from the total // replicas count. These replicas should be distributed proportionally to the active // replica sets. deploymentReplicasToAdd := allowedSize - allRSsReplicas // The additional replicas should be distributed proportionally amongst the active // replica sets from the larger to the smaller in size replica set. Scaling direction // drives what happens in case we are trying to scale replica sets of the same size. // In such a case when scaling up, we should scale up newer replica sets first, and // when scaling down, we should scale down older replica sets first. var scalingOperation string switch { case deploymentReplicasToAdd > 0: sort.Sort(controller.ReplicaSetsBySizeNewer(allRSs)) scalingOperation = "up" case deploymentReplicasToAdd < 0: sort.Sort(controller.ReplicaSetsBySizeOlder(allRSs)) scalingOperation = "down" } // Iterate over all active replica sets and estimate proportions for each of them. // The absolute value of deploymentReplicasAdded should never exceed the absolute // value of deploymentReplicasToAdd. deploymentReplicasAdded := int32(0) nameToSize := make(map[string]int32) for i := range allRSs { rs := allRSs[i] // Estimate proportions if we have replicas to add, otherwise simply populate // nameToSize with the current sizes for each replica set. if deploymentReplicasToAdd != 0 { proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded) nameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion deploymentReplicasAdded += proportion } else { nameToSize[rs.Name] = *(rs.Spec.Replicas) } } // Update all replica sets for i := range allRSs { rs := allRSs[i] // Add/remove any leftovers to the largest replica set. if i == 0 && deploymentReplicasToAdd != 0 { leftover := deploymentReplicasToAdd - deploymentReplicasAdded nameToSize[rs.Name] = nameToSize[rs.Name] + leftover if nameToSize[rs.Name] < 0 { nameToSize[rs.Name] = 0 } } // TODO: Use transactions when we have them. if _, _, err := dc.scaleReplicaSet(rs, nameToSize[rs.Name], deployment, scalingOperation); err != nil { // Return as soon as we fail, the deployment is requeued return err } } } return nil } func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { // No need to scale if *(rs.Spec.Replicas) == newScale { return false, rs, nil } var scalingOperation string if *(rs.Spec.Replicas) < newScale { scalingOperation = "up" } else { scalingOperation = "down" } scaled, newRS, err := dc.scaleReplicaSet(rs, newScale, deployment, scalingOperation) return scaled, newRS, err } func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment, scalingOperation string) (bool, *extensions.ReplicaSet, error) { objCopy, err := api.Scheme.Copy(rs) if err != nil { return false, nil, err } rsCopy := objCopy.(*extensions.ReplicaSet) sizeNeedsUpdate := *(rsCopy.Spec.Replicas) != newScale // TODO: Do not mutate the replica set here, instead simply compare the annotation and if they mismatch // call SetReplicasAnnotations inside the following if clause. Then we can also move the deep-copy from // above inside the if too. annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment)) scaled := false if sizeNeedsUpdate || annotationsNeedUpdate { *(rsCopy.Spec.Replicas) = newScale rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy) if err == nil && sizeNeedsUpdate { scaled = true dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) } } return scaled, rs, err } // cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old replica sets // where N=d.Spec.RevisionHistoryLimit. Old replica sets are older versions of the podtemplate of a deployment kept // around by default 1) for historical reasons and 2) for the ability to rollback a deployment. func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) error { if deployment.Spec.RevisionHistoryLimit == nil { return nil } diff := int32(len(oldRSs)) - *deployment.Spec.RevisionHistoryLimit if diff <= 0 { return nil } sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs)) var errList []error // TODO: This should be parallelized. for i := int32(0); i < diff; i++ { rs := oldRSs[i] // Avoid delete replica set with non-zero replica counts if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration { continue } if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { glog.V(2).Infof("Failed deleting old replica set %v for deployment %v: %v", rs.Name, deployment.Name, err) errList = append(errList, err) } } return utilerrors.NewAggregate(errList) } // syncDeploymentStatus checks if the status is up-to-date and sync it if necessary func (dc *DeploymentController) syncDeploymentStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, d *extensions.Deployment) error { newStatus := calculateStatus(allRSs, newRS, d) if reflect.DeepEqual(d.Status, newStatus) { return nil } newDeployment := d newDeployment.Status = newStatus _, err := dc.client.Extensions().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } // calculateStatus calculates the latest status for the provided deployment by looking into the provided replica sets. func calculateStatus(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) extensions.DeploymentStatus { availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs) totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) unavailableReplicas := totalReplicas - availableReplicas // If unavailableReplicas is negative, then that means the Deployment has more available replicas running than // desired, eg. whenever it scales down. In such a case we should simply default unavailableReplicas to zero. if unavailableReplicas < 0 { unavailableReplicas = 0 } status := extensions.DeploymentStatus{ // TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value. ObservedGeneration: deployment.Generation, Replicas: deploymentutil.GetActualReplicaCountForReplicaSets(allRSs), UpdatedReplicas: deploymentutil.GetActualReplicaCountForReplicaSets([]*extensions.ReplicaSet{newRS}), ReadyReplicas: deploymentutil.GetReadyReplicaCountForReplicaSets(allRSs), AvailableReplicas: availableReplicas, UnavailableReplicas: unavailableReplicas, } // Copy conditions one by one so we won't mutate the original object. conditions := deployment.Status.Conditions for i := range conditions { status.Conditions = append(status.Conditions, conditions[i]) } if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) { minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.") deploymentutil.SetDeploymentCondition(&status, *minAvailability) } else { noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.") deploymentutil.SetDeploymentCondition(&status, *noMinAvailability) } return status } // isScalingEvent checks whether the provided deployment has been updated with a scaling event // by looking at the desired-replicas annotation in the active replica sets of the deployment. func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment) (bool, error) { newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(d, false) if err != nil { return false, err } allRSs := append(oldRSs, newRS) for _, rs := range controller.FilterActiveReplicaSets(allRSs) { desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs) if !ok { continue } if desired != *(d.Spec.Replicas) { return true, nil } } return false, nil }
pkg/controller/deployment/sync.go
1
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.9976540207862854, 0.395025372505188, 0.00016979774227365851, 0.06047388166189194, 0.45167383551597595 ]
{ "id": 4, "code_window": [ "\t\tif err != nil {\n", "\t\t\treturn false, err\n", "\t\t}\n", "\t\treturn len(oldRSs) == desiredRSNum, nil\n", "\t})\n", "}\n", "\n", "func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {\n", "\tif newRS != nil {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif pollErr == wait.ErrWaitTimeout {\n", "\t\tpollErr = fmt.Errorf(\"%d old replica sets were not cleaned up for deployment %q\", len(oldRSs)-desiredRSNum, deploymentName)\n", "\t\tlogReplicaSetsOfDeployment(d, oldRSs, nil)\n", "\t}\n", "\treturn pollErr\n" ], "file_path": "test/e2e/framework/util.go", "type": "add", "edit_start_line_idx": 1011 }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" ) // Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's // phase. Note: the PV is deleted in the AfterEach, not here. func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { // 1. verify that the PV and PVC have binded correctly By("Validating the PV-PVC binding") waitOnPVandPVC(c, ns, pv, pvc) // 2. create the nfs writer pod, test if the write was successful, // then delete the pod and verify that it was deleted By("Checking pod has write access to PersistentVolume") createWaitAndDeletePod(f, c, ns, pvc.Name) // 3. delete the PVC, wait for PV to become "Available" By("Deleting the PVC to invoke the recycler") deletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable) } // Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate // PV. Ensure each step succeeds. // Note: the PV is deleted in the AfterEach, not here. // Note: this func is serialized, we wait for each pod to be deleted before creating the // next pod. Adding concurrency is a TODO item. // Note: this func is called recursively when there are more claims than pvs. func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string, pvols pvmap, claims pvcmap) { // 1. verify each PV permits write access to a client pod By("Checking pod has write access to PersistentVolumes") for pvcKey := range claims { pvc, err := c.Core().PersistentVolumeClaims(pvcKey.Namespace).Get(pvcKey.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) if len(pvc.Spec.VolumeName) == 0 { continue // claim is not bound } // sanity test to ensure our maps are in sync _, found := pvols[pvc.Spec.VolumeName] Expect(found).To(BeTrue()) // TODO: currently a serialized test of each PV createWaitAndDeletePod(f, c, pvcKey.Namespace, pvcKey.Name) } // 2. delete each PVC, wait for its bound PV to become "Available" By("Deleting PVCs to invoke recycler") deletePVCandValidatePVGroup(c, ns, pvols, claims) } // Creates a PV, PVC, and ClientPod that will run until killed by test or clean up. func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig persistentVolumeConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { By("Creating the PV and PVC") pv, pvc := createPVPVC(c, pvConfig, ns, isPrebound) waitOnPVandPVC(c, ns, pv, pvc) By("Creating the Client Pod") clientPod := createClientPod(c, ns, pvc) return clientPod, pv, pvc } // initNFSserverPod wraps volumes.go's startVolumeServer to return a running nfs host pod // commonly used by persistent volume testing func initNFSserverPod(c clientset.Interface, ns string) *v1.Pod { return startVolumeServer(c, VolumeTestConfig{ namespace: ns, prefix: "nfs", serverImage: NfsServerImage, serverPorts: []int{2049}, serverArgs: []string{"-G", "777", "/exports"}, }) } var _ = framework.KubeDescribe("PersistentVolumes [Volume][Serial]", func() { // global vars for the Context()s and It()'s below f := framework.NewDefaultFramework("pv") var c clientset.Interface var ns string BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name }) /////////////////////////////////////////////////////////////////////// // NFS /////////////////////////////////////////////////////////////////////// // Testing configurations of a single a PV/PVC pair, multiple evenly paired PVs/PVCs, // and multiple unevenly paired PV/PVCs framework.KubeDescribe("PersistentVolumes:NFS[Flaky]", func() { var ( nfsServerPod *v1.Pod serverIP string pvConfig persistentVolumeConfig ) BeforeEach(func() { framework.Logf("[BeforeEach] Creating NFS Server Pod") nfsServerPod = initNFSserverPod(c, ns) serverIP = nfsServerPod.Status.PodIP framework.Logf("[BeforeEach] Configuring PersistentVolume") pvConfig = persistentVolumeConfig{ namePrefix: "nfs-", pvSource: v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ Server: serverIP, Path: "/exports", ReadOnly: false, }, }, } }) AfterEach(func() { deletePodWithWait(f, c, nfsServerPod) }) Context("with Single PV - PVC pairs", func() { var pv *v1.PersistentVolume var pvc *v1.PersistentVolumeClaim // Note: this is the only code where the pv is deleted. AfterEach(func() { framework.Logf("AfterEach: Cleaning up test resources.") pvPvcCleanup(c, ns, pv, pvc) }) // Individual tests follow: // // Create an nfs PV, then a claim that matches the PV, and a pod that // contains the claim. Verify that the PV and PVC bind correctly, and // that the pod can write to the nfs volume. It("should create a non-pre-bound PV and PVC: test write access ", func() { pv, pvc = createPVPVC(c, pvConfig, ns, false) completeTest(f, c, ns, pv, pvc) }) // Create a claim first, then a nfs PV that matches the claim, and a // pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. It("create a PVC and non-pre-bound PV: test write access", func() { pv, pvc = createPVCPV(c, pvConfig, ns, false) completeTest(f, c, ns, pv, pvc) }) // Create a claim first, then a pre-bound nfs PV that matches the claim, // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. It("create a PVC and a pre-bound PV: test write access", func() { pv, pvc = createPVCPV(c, pvConfig, ns, true) completeTest(f, c, ns, pv, pvc) }) // Create a nfs PV first, then a pre-bound PVC that matches the PV, // and a pod that contains the claim. Verify that the PV and PVC bind // correctly, and that the pod can write to the nfs volume. It("create a PV and a pre-bound PVC: test write access", func() { pv, pvc = createPVPVC(c, pvConfig, ns, true) completeTest(f, c, ns, pv, pvc) }) }) // Create multiple pvs and pvcs, all in the same namespace. The PVs-PVCs are // verified to bind, though it's not known in advanced which PV will bind to // which claim. For each pv-pvc pair create a pod that writes to the nfs mount. // Note: when the number of PVs exceeds the number of PVCs the max binding wait // time will occur for each PV in excess. This is expected but the delta // should be kept small so that the tests aren't unnecessarily slow. // Note: future tests may wish to incorporate the following: // a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods // in different namespaces. Context("with multiple PVs and PVCs all in same ns", func() { // define the maximum number of PVs and PVCs supported by these tests const maxNumPVs = 10 const maxNumPVCs = 10 // create the pv and pvc maps to be reused in the It blocks pvols := make(pvmap, maxNumPVs) claims := make(pvcmap, maxNumPVCs) AfterEach(func() { framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols)) pvPvcMapCleanup(c, ns, pvols, claims) }) // Create 2 PVs and 4 PVCs. // Note: PVs are created before claims and no pre-binding It("should create 2 PVs and 4 PVCs: test write access", func() { numPVs, numPVCs := 2, 4 pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig) waitAndVerifyBinds(c, ns, pvols, claims, true) completeMultiTest(f, c, ns, pvols, claims) }) // Create 3 PVs and 3 PVCs. // Note: PVs are created before claims and no pre-binding It("should create 3 PVs and 3 PVCs: test write access", func() { numPVs, numPVCs := 3, 3 pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig) waitAndVerifyBinds(c, ns, pvols, claims, true) completeMultiTest(f, c, ns, pvols, claims) }) // Create 4 PVs and 2 PVCs. // Note: PVs are created before claims and no pre-binding. It("should create 4 PVs and 2 PVCs: test write access", func() { numPVs, numPVCs := 4, 2 pvols, claims = createPVsPVCs(numPVs, numPVCs, c, ns, pvConfig) waitAndVerifyBinds(c, ns, pvols, claims, true) completeMultiTest(f, c, ns, pvols, claims) }) }) }) /////////////////////////////////////////////////////////////////////// // GCE PD /////////////////////////////////////////////////////////////////////// // Testing configurations of single a PV/PVC pair attached to a GCE PD framework.KubeDescribe("PersistentVolumes:GCEPD", func() { var ( diskName string node types.NodeName err error pv *v1.PersistentVolume pvc *v1.PersistentVolumeClaim clientPod *v1.Pod pvConfig persistentVolumeConfig ) BeforeEach(func() { framework.SkipUnlessProviderIs("gce") By("Initializing Test Spec") if diskName == "" { diskName, err = createPDWithRetry() Expect(err).NotTo(HaveOccurred()) pvConfig = persistentVolumeConfig{ namePrefix: "gce-", pvSource: v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: diskName, FSType: "ext3", ReadOnly: false, }, }, prebind: nil, } } clientPod, pv, pvc = initializeGCETestSpec(c, ns, pvConfig, false) node = types.NodeName(clientPod.Spec.NodeName) }) AfterEach(func() { framework.Logf("AfterEach: Cleaning up test resources") if c != nil { deletePodWithWait(f, c, clientPod) pvPvcCleanup(c, ns, pv, pvc) clientPod = nil pvc = nil pv = nil } node, clientPod, pvc, pv = "", nil, nil, nil }) AddCleanupAction(func() { if len(diskName) > 0 { deletePDWithRetry(diskName) } }) // Attach a persistent disk to a pod using a PVC. // Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete. It("should test that deleting a PVC before the pod does not cause pod deletion to fail on PD detach", func() { By("Deleting the Claim") deletePersistentVolumeClaim(c, pvc.Name, ns) verifyGCEDiskAttached(diskName, node) By("Deleting the Pod") deletePodWithWait(f, c, clientPod) By("Verifying Persistent Disk detach") err = waitForPDDetach(diskName, node) Expect(err).NotTo(HaveOccurred()) }) // Attach a persistent disk to a pod using a PVC. // Delete the PV and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete. It("should test that deleting the PV before the pod does not cause pod deletion to fail on PD detach", func() { By("Deleting the Persistent Volume") deletePersistentVolume(c, pv.Name) verifyGCEDiskAttached(diskName, node) By("Deleting the client pod") deletePodWithWait(f, c, clientPod) By("Verifying Persistent Disk detaches") err = waitForPDDetach(diskName, node) Expect(err).NotTo(HaveOccurred()) }) // Test that a Pod and PVC attached to a GCEPD successfully unmounts and detaches when the encompassing Namespace is deleted. It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk", func() { By("Deleting the Namespace") err := c.Core().Namespaces().Delete(ns, nil) Expect(err).NotTo(HaveOccurred()) err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) Expect(err).NotTo(HaveOccurred()) By("Verifying Persistent Disk detaches") err = waitForPDDetach(diskName, node) Expect(err).NotTo(HaveOccurred()) }) }) })
test/e2e/persistent_volumes.go
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.0019142942037433386, 0.0002586429181974381, 0.00016439976752735674, 0.00017185478645842522, 0.0002977491822093725 ]
{ "id": 4, "code_window": [ "\t\tif err != nil {\n", "\t\t\treturn false, err\n", "\t\t}\n", "\t\treturn len(oldRSs) == desiredRSNum, nil\n", "\t})\n", "}\n", "\n", "func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {\n", "\tif newRS != nil {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif pollErr == wait.ErrWaitTimeout {\n", "\t\tpollErr = fmt.Errorf(\"%d old replica sets were not cleaned up for deployment %q\", len(oldRSs)-desiredRSNum, deploymentName)\n", "\t\tlogReplicaSetsOfDeployment(d, oldRSs, nil)\n", "\t}\n", "\treturn pollErr\n" ], "file_path": "test/e2e/framework/util.go", "type": "add", "edit_start_line_idx": 1011 }
# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) Package errors provides simple error handling primitives. `go get github.com/pkg/errors` The traditional error handling idiom in Go is roughly akin to ```go if err != nil { return err } ``` which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. ## Adding context to an error The errors.Wrap function returns a new error that adds context to the original error. For example ```go _, err := ioutil.ReadAll(r) if err != nil { return errors.Wrap(err, "read failed") } ``` ## Retrieving the cause of an error Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. ```go type causer interface { Cause() error } ``` `errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: ```go switch err := errors.Cause(err).(type) { case *MyError: // handle specifically default: // unknown error } ``` [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). ## Contributing We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. Before proposing a change, please discuss your change by raising an issue. ## Licence BSD-2-Clause
vendor/github.com/pkg/errors/README.md
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.0003863152232952416, 0.00020686395873781294, 0.00016483313811477274, 0.000172068685060367, 0.00008037183579290286 ]
{ "id": 4, "code_window": [ "\t\tif err != nil {\n", "\t\t\treturn false, err\n", "\t\t}\n", "\t\treturn len(oldRSs) == desiredRSNum, nil\n", "\t})\n", "}\n", "\n", "func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {\n", "\tif newRS != nil {\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tif pollErr == wait.ErrWaitTimeout {\n", "\t\tpollErr = fmt.Errorf(\"%d old replica sets were not cleaned up for deployment %q\", len(oldRSs)-desiredRSNum, deploymentName)\n", "\t\tlogReplicaSetsOfDeployment(d, oldRSs, nil)\n", "\t}\n", "\treturn pollErr\n" ], "file_path": "test/e2e/framework/util.go", "type": "add", "edit_start_line_idx": 1011 }
// mksyscall.pl syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go // MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT // +build arm,darwin package unix import ( "syscall" "unsafe" ) var _ syscall.Errno // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) { r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) fd = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := Syscall(SYS_BIND, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := Syscall(SYS_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socket(domain int, typ int, proto int) (fd int, err error) { r0, _, e1 := RawSyscall(SYS_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := Syscall6(SYS_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := Syscall6(SYS_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := RawSyscall(SYS_GETPEERNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := RawSyscall(SYS_GETSOCKNAME, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Shutdown(s int, how int) (err error) { _, _, e1 := Syscall(SYS_SHUTDOWN, uintptr(s), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := RawSyscall6(SYS_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall6(SYS_RECVFROM, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS_SENDTO, uintptr(s), uintptr(_p0), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := Syscall(SYS_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := Syscall(SYS_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags)) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, nevent int, timeout *Timespec) (n int, err error) { r0, _, e1 := Syscall6(SYS_KEVENT, uintptr(kq), uintptr(change), uintptr(nchange), uintptr(event), uintptr(nevent), uintptr(unsafe.Pointer(timeout))) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { _p0 = unsafe.Pointer(&mib[0]) } else { _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) use(_p0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(timeval)), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func futimes(fd int, timeval *[2]Timeval) (err error) { _, _, e1 := Syscall(SYS_FUTIMES, uintptr(fd), uintptr(unsafe.Pointer(timeval)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg)) val = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func pipe() (r int, w int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) r = int(r0) w = int(r1) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_ACCESS, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := Syscall(SYS_ADJTIME, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chflags(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_CHFLAGS, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chmod(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_CHMOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_CHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Chroot(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Close(fd int) (err error) { _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup(fd int) (nfd int, err error) { r0, _, e1 := Syscall(SYS_DUP, uintptr(fd), 0, 0) nfd = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Dup2(from int, to int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(from), uintptr(to), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exchangedata(path1 string, path2 string, options int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path1) if err != nil { return } var _p1 *byte _p1, err = BytePtrFromString(path2) if err != nil { return } _, _, e1 := Syscall(SYS_EXCHANGEDATA, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) use(unsafe.Pointer(_p0)) use(unsafe.Pointer(_p1)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Exit(code int) { Syscall(SYS_EXIT, uintptr(code), 0, 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchflags(fd int, flags int) (err error) { _, _, e1 := Syscall(SYS_FCHFLAGS, uintptr(fd), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Flock(fd int, how int) (err error) { _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := Syscall(SYS_FPATHCONF, uintptr(fd), uintptr(name), 0) val = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := Syscall(SYS_FSTAT64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Ftruncate(fd int, length int64) (err error) { _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { var _p0 unsafe.Pointer if len(buf) > 0 { _p0 = unsafe.Pointer(&buf[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getdtablesize() (size int) { r0, _, _ := Syscall(SYS_GETDTABLESIZE, 0, 0, 0) size = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getegid() (egid int) { r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0) egid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Geteuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0) uid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getgid() (gid int) { r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0) gid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgid(pid int) (pgid int, err error) { r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0) pgid = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpgrp() (pgrp int) { r0, _, _ := RawSyscall(SYS_GETPGRP, 0, 0, 0) pgrp = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpid() (pid int) { r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0) pid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getppid() (ppid int) { r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0) ppid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getpriority(which int, who int) (prio int, err error) { r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0) prio = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getsid(pid int) (sid int, err error) { r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) sid = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Getuid() (uid int) { r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0) uid = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Issetugid() (tainted bool) { r0, _, _ := RawSyscall(SYS_ISSETUGID, 0, 0, 0) tainted = bool(r0 != 0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Kqueue() (fd int, err error) { r0, _, e1 := Syscall(SYS_KQUEUE, 0, 0, 0) fd = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lchown(path string, uid int, gid int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid)) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Link(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } var _p1 *byte _p1, err = BytePtrFromString(link) if err != nil { return } _, _, e1 := Syscall(SYS_LINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) use(unsafe.Pointer(_p0)) use(unsafe.Pointer(_p1)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Listen(s int, backlog int) (err error) { _, _, e1 := Syscall(SYS_LISTEN, uintptr(s), uintptr(backlog), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Lstat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_LSTAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkdir(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_MKDIR, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mkfifo(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_MKFIFO, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mknod(path string, mode uint32, dev int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_MKNOD, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev)) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mlockall(flags int) (err error) { _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Mprotect(b []byte, prot int) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlock(b []byte) (err error) { var _p0 unsafe.Pointer if len(b) > 0 { _p0 = unsafe.Pointer(&b[0]) } else { _p0 = unsafe.Pointer(&_zero) } _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Munlockall() (err error) { _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Open(path string, mode int, perm uint32) (fd int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm)) use(unsafe.Pointer(_p0)) fd = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pathconf(path string, name int) (val int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } r0, _, e1 := Syscall(SYS_PATHCONF, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0) use(unsafe.Pointer(_p0)) val = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pread(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Pwrite(fd int, p []byte, offset int64) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func read(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Readlink(path string, buf []byte) (n int, err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } var _p1 unsafe.Pointer if len(buf) > 0 { _p1 = unsafe.Pointer(&buf[0]) } else { _p1 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_READLINK, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf))) use(unsafe.Pointer(_p0)) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rename(from string, to string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(from) if err != nil { return } var _p1 *byte _p1, err = BytePtrFromString(to) if err != nil { return } _, _, e1 := Syscall(SYS_RENAME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) use(unsafe.Pointer(_p0)) use(unsafe.Pointer(_p1)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Revoke(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_REVOKE, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Rmdir(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_RMDIR, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence)) newoffset = int64(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setegid(egid int) (err error) { _, _, e1 := Syscall(SYS_SETEGID, uintptr(egid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Seteuid(euid int) (err error) { _, _, e1 := RawSyscall(SYS_SETEUID, uintptr(euid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setgid(gid int) (err error) { _, _, e1 := RawSyscall(SYS_SETGID, uintptr(gid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setlogin(name string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(name) if err != nil { return } _, _, e1 := Syscall(SYS_SETLOGIN, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpgid(pid int, pgid int) (err error) { _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setprivexec(flag int) (err error) { _, _, e1 := Syscall(SYS_SETPRIVEXEC, uintptr(flag), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setregid(rgid int, egid int) (err error) { _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setreuid(ruid int, euid int) (err error) { _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(which), uintptr(unsafe.Pointer(lim)), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Settimeofday(tp *Timeval) (err error) { _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Setuid(uid int) (err error) { _, _, e1 := RawSyscall(SYS_SETUID, uintptr(uid), 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Stat(path string, stat *Stat_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_STAT64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Statfs(path string, stat *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_STATFS64, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Symlink(path string, link string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } var _p1 *byte _p1, err = BytePtrFromString(link) if err != nil { return } _, _, e1 := Syscall(SYS_SYMLINK, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0) use(unsafe.Pointer(_p0)) use(unsafe.Pointer(_p1)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Sync() (err error) { _, _, e1 := Syscall(SYS_SYNC, 0, 0, 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Truncate(path string, length int64) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Umask(newmask int) (oldmask int) { r0, _, _ := Syscall(SYS_UMASK, uintptr(newmask), 0, 0) oldmask = int(r0) return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Undelete(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_UNDELETE, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unlink(path string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_UNLINK, uintptr(unsafe.Pointer(_p0)), 0, 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func Unmount(path string, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) if err != nil { return } _, _, e1 := Syscall(SYS_UNMOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) use(unsafe.Pointer(_p0)) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { _p0 = unsafe.Pointer(&p[0]) } else { _p0 = unsafe.Pointer(&_zero) } r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p))) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) { r0, _, e1 := Syscall6(SYS_MMAP, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func readlen(fd int, buf *byte, nbuf int) (n int, err error) { r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func writelen(fd int, buf *byte, nbuf int) (n int, err error) { r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) n = int(r0) if e1 != 0 { err = errnoErr(e1) } return } // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) usec = int32(r1) if e1 != 0 { err = errnoErr(e1) } return }
staging/src/k8s.io/client-go/_vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
0
https://github.com/kubernetes/kubernetes/commit/ff83eb58ebbe570fdd3d495fdfbd7b6312e97184
[ 0.9635860323905945, 0.007387814112007618, 0.00016860161849763244, 0.00042112101800739765, 0.08024445176124573 ]
{ "id": 0, "code_window": [ "\t\"k8s.io/klog\"\n", ")\n", "\n", "var (\n", "\ttunnelOpenCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "pkg/ssh/ssh.go", "type": "add", "edit_start_line_idx": 48 }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metrics import ( "sync" "time" compbasemetrics "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) var ( etcdRequestLatency = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "etcd_request_duration_seconds", Help: "Etcd request latency in seconds for each operation and object type.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"operation", "type"}, ) objectCounts = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "etcd_object_counts", Help: "Number of stored objects at the time of last check split by kind.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"resource"}, ) deprecatedEtcdRequestLatenciesSummary = compbasemetrics.NewSummaryVec( &compbasemetrics.SummaryOpts{ Name: "etcd_request_latencies_summary", Help: "(Deprecated) Etcd request latency summary in microseconds for each operation and object type.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"operation", "type"}, ) ) var registerMetrics sync.Once // Register all metrics. func Register() { // Register the metrics. registerMetrics.Do(func() { legacyregistry.MustRegister(etcdRequestLatency) legacyregistry.MustRegister(objectCounts) // TODO(danielqsj): Remove the following metrics, they are deprecated legacyregistry.MustRegister(deprecatedEtcdRequestLatenciesSummary) }) } // UpdateObjectCount sets the etcd_object_counts metric. func UpdateObjectCount(resourcePrefix string, count int64) { objectCounts.WithLabelValues(resourcePrefix).Set(float64(count)) } // RecordEtcdRequestLatency sets the etcd_request_duration_seconds metrics. func RecordEtcdRequestLatency(verb, resource string, startTime time.Time) { etcdRequestLatency.WithLabelValues(verb, resource).Observe(sinceInSeconds(startTime)) deprecatedEtcdRequestLatenciesSummary.WithLabelValues(verb, resource).Observe(sinceInMicroseconds(startTime)) } // Reset resets the etcd_request_duration_seconds metric. func Reset() { etcdRequestLatency.Reset() deprecatedEtcdRequestLatenciesSummary.Reset() } // sinceInMicroseconds gets the time since the specified start in microseconds. func sinceInMicroseconds(start time.Time) float64 { return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) } // sinceInSeconds gets the time since the specified start in seconds. func sinceInSeconds(start time.Time) float64 { return time.Since(start).Seconds() }
staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.014210296794772148, 0.0020933724008500576, 0.00016760430298745632, 0.00021730497246608138, 0.004280381370335817 ]
{ "id": 0, "code_window": [ "\t\"k8s.io/klog\"\n", ")\n", "\n", "var (\n", "\ttunnelOpenCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "pkg/ssh/ssh.go", "type": "add", "edit_start_line_idx": 48 }
package main import ( "flag" "fmt" "os" "path/filepath" "strings" "text/template" ) func BuildGenerateCommand() *Command { var agouti, noDot, internal bool flagSet := flag.NewFlagSet("generate", flag.ExitOnError) flagSet.BoolVar(&agouti, "agouti", false, "If set, generate will generate a test file for writing Agouti tests") flagSet.BoolVar(&noDot, "nodot", false, "If set, generate will generate a test file that does not . import ginkgo and gomega") flagSet.BoolVar(&internal, "internal", false, "If set, generate will generate a test file that uses the regular package name") return &Command{ Name: "generate", FlagSet: flagSet, UsageCommand: "ginkgo generate <filename(s)>", Usage: []string{ "Generate a test file named filename_test.go", "If the optional <filenames> argument is omitted, a file named after the package in the current directory will be created.", "Accepts the following flags:", }, Command: func(args []string, additionalArgs []string) { generateSpec(args, agouti, noDot, internal) }, } } var specText = `package {{.Package}} import ( {{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}} {{if .IncludeImports}}. "github.com/onsi/gomega"{{end}} {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}} ) var _ = Describe("{{.Subject}}", func() { }) ` var agoutiSpecText = `package {{.Package}} import ( {{if .IncludeImports}}. "github.com/onsi/ginkgo"{{end}} {{if .IncludeImports}}. "github.com/onsi/gomega"{{end}} "github.com/sclevine/agouti" . "github.com/sclevine/agouti/matchers" {{if .DotImportPackage}}. "{{.PackageImportPath}}"{{end}} ) var _ = Describe("{{.Subject}}", func() { var page *agouti.Page BeforeEach(func() { var err error page, err = agoutiDriver.NewPage() Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { Expect(page.Destroy()).To(Succeed()) }) }) ` type specData struct { Package string Subject string PackageImportPath string IncludeImports bool DotImportPackage bool } func generateSpec(args []string, agouti, noDot, internal bool) { if len(args) == 0 { err := generateSpecForSubject("", agouti, noDot, internal) if err != nil { fmt.Println(err.Error()) fmt.Println("") os.Exit(1) } fmt.Println("") return } var failed bool for _, arg := range args { err := generateSpecForSubject(arg, agouti, noDot, internal) if err != nil { failed = true fmt.Println(err.Error()) } } fmt.Println("") if failed { os.Exit(1) } } func generateSpecForSubject(subject string, agouti, noDot, internal bool) error { packageName, specFilePrefix, formattedName := getPackageAndFormattedName() if subject != "" { specFilePrefix = formatSubject(subject) formattedName = prettifyPackageName(specFilePrefix) } data := specData{ Package: determinePackageName(packageName, internal), Subject: formattedName, PackageImportPath: getPackageImportPath(), IncludeImports: !noDot, DotImportPackage: !internal, } targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) if fileExists(targetFile) { return fmt.Errorf("%s already exists.", targetFile) } else { fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) } f, err := os.Create(targetFile) if err != nil { return err } defer f.Close() var templateText string if agouti { templateText = agoutiSpecText } else { templateText = specText } specTemplate, err := template.New("spec").Parse(templateText) if err != nil { return err } specTemplate.Execute(f, data) goFmt(targetFile) return nil } func formatSubject(name string) string { name = strings.Replace(name, "-", "_", -1) name = strings.Replace(name, " ", "_", -1) name = strings.Split(name, ".go")[0] name = strings.Split(name, "_test")[0] return name } func getPackageImportPath() string { workingDir, err := os.Getwd() if err != nil { panic(err.Error()) } sep := string(filepath.Separator) paths := strings.Split(workingDir, sep+"src"+sep) if len(paths) == 1 { fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") return "UNKNOWN_PACKAGE_PATH" } return filepath.ToSlash(paths[len(paths)-1]) }
vendor/github.com/onsi/ginkgo/ginkgo/generate_command.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00023856284678913653, 0.00017604843014851213, 0.00016890557890292257, 0.00017297212616540492, 0.000015291376257664524 ]
{ "id": 0, "code_window": [ "\t\"k8s.io/klog\"\n", ")\n", "\n", "var (\n", "\ttunnelOpenCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "pkg/ssh/ssh.go", "type": "add", "edit_start_line_idx": 48 }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package configmap import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" api "k8s.io/kubernetes/pkg/apis/core" ) func TestConfigMapStrategy(t *testing.T) { ctx := genericapirequest.NewDefaultContext() if !Strategy.NamespaceScoped() { t.Errorf("ConfigMap must be namespace scoped") } if Strategy.AllowCreateOnUpdate() { t.Errorf("ConfigMap should not allow create on update") } cfg := &api.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "valid-config-data", Namespace: metav1.NamespaceDefault, }, Data: map[string]string{ "foo": "bar", }, } Strategy.PrepareForCreate(ctx, cfg) errs := Strategy.Validate(ctx, cfg) if len(errs) != 0 { t.Errorf("unexpected error validating %v", errs) } newCfg := &api.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "valid-config-data-2", Namespace: metav1.NamespaceDefault, ResourceVersion: "4", }, Data: map[string]string{ "invalidKey": "updatedValue", }, } Strategy.PrepareForUpdate(ctx, newCfg, cfg) errs = Strategy.ValidateUpdate(ctx, newCfg, cfg) if len(errs) == 0 { t.Errorf("Expected a validation error") } }
pkg/registry/core/configmap/strategy_test.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00018712533346842974, 0.00017625383043196052, 0.00017026791465468705, 0.0001753850665409118, 0.0000052518753363983706 ]
{ "id": 0, "code_window": [ "\t\"k8s.io/klog\"\n", ")\n", "\n", "var (\n", "\ttunnelOpenCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "pkg/ssh/ssh.go", "type": "add", "edit_start_line_idx": 48 }
{% panel style="success", title="Providing Feedback" %} **Provide feedback at the [survey](https://www.surveymonkey.com/r/CLQBQHR)** {% endpanel %} {% panel style="info", title="TL;DR" %} - Fields set and deleted from Resource Config are merged into Resources by Apply - If a Resource already exists, Apply updates the Resources by merging the local Resource Config into the remote Resources - Fields removed from the Resource Config will be deleted from the remote Resource {% endpanel %} # Merging Fields {% panel style="warning", title="Advanced Section" %} This chapter contains advanced material that readers may want to skip and come back to later. {% endpanel %} ## When are fields merged? This page describes how Resource Config is merged with Resources or other Resource Config. This may occur when: - Applying Resource Config updates to the live Resources in the cluster - Defining Patches in the `kustomization.yaml` which are overlayed on `resources` and [bases](../app_customization/bases_and_variants.md) ### Applying Resource Config Updates Rather than replacing the Resource with the new Resource Config, **Apply will merge the new Resource Config into the live Resource**. This retains values which may be set by the control plane - such as `replicas` values set by auto scalers ### Defining Patches `patches` are sparse Resource Config which **contain a subset of fields that override values defined in other Resource Config** with the same Group/Version/Kind/Namespace/Name. This is used to alter values defined on Resource Config without having to fork it. ## Motivation (Apply) This page describes the semantics for merging Resource Config. Ownership of Resource fields are shared between declarative Resource Config authored by human users, and values set by Controllers running in the cluster. Some fields, such as the `status` and `clusterIp` fields, are owned exclusively by Controllers. Fields, such as the `name` and `namespace` fields, are owned exclusively by the human user managing the Resource. Other fields, such as `replicas`, may be owned by either human users, the apiserver or Controllers. For example, `replicas` may be explicitly set by a user, implicitly set to a default value by the apiserver, or continuously adjusted by a Controller such as and HorizontalPodAutoscaler. {% method %} ### Last Applied Resource Config When Apply creates or updates a Resource, it writes the Resource Config it Applied to an annotation on the Resource. This allows it to compare the last Resource Config it Applied to the current Resource Config and identify fields that have been deleted. {% sample lang="yaml" %} ```yaml # deployment.yaml (Resource Config) apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment spec: selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.7.9 ``` ```yaml # Original Resource Doesn't Exist ``` ```yaml # Applied Resource kind: Deployment metadata: annotations: # ... # This is the deployment.yaml Resource Config written as an annotation on the object # It was written by kubectl apply when the object was created kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"apps/v1","kind":"Deployment", "metadata":{"annotations":{},"name":"nginx-deployment","namespace":"default"}, "spec":{"selector":{"matchLabels":{"app":nginx}},"template":{"metadata":{"labels":{"app":"nginx"}}, "spec":{"containers":[{"image":"nginx:1.7.9","name":"nginx"}]}}}} # ... spec: # ... status: # ... ``` {% endmethod %} ## Merging Resources Following are the merge semantics for Resources: {% method %} **Adding Fields:** - Fields present in the Resource Config that are missing from the Resource will be added to the Resource. - Fields will be added to the Last Applied Resource Config {% sample lang="yaml" %} ```yaml # deployment.yaml (Resource Config) apiVersion: apps/v1 kind: Deployment metadata: # ... name: nginx-deployment spec: # ... minReadySeconds: 3 ``` ```yaml # Original Resource kind: Deployment metadata: # ... name: nginx-deployment spec: # ... status: # ... ``` ```yaml # Applied Resource kind: Deployment metadata: # ... name: nginx-deployment spec: # ... minReadySeconds: 3 status: # ... ``` {% endmethod %} {% method %} **Updating Fields** - Fields present in the Resource Config that are also present in the Resource will be merged recursively until a primitive field is updated, or a field is added / deleted. - Fields will be updated in the Last Applied Resource Config {% sample lang="yaml" %} ```yaml # deployment.yaml (Resource Config) apiVersion: apps/v1 kind: Deployment metadata: # ... name: nginx-deployment spec: # ... replicas: 2 ``` ```yaml # Original Resource kind: Deployment metadata: # ... name: nginx-deployment spec: # ... # could be defaulted or set by Resource Config replicas: 1 status: # ... ``` ```yaml # Applied Resource kind: Deployment metadata: # ... name: nginx-deployment spec: # ... # updated replicas: 2 status: # ... ``` {% endmethod %} {% method %} **Deleting Fields** - Fields present in the **Last Applied Resource Config** that have been removed from the Resource Config will be deleted from the Resource. - Fields set to *null* in the Resource Config that are present in the Resource Config will be deleted from the Resource. - Fields will be removed from the Last Applied Resource Config {% sample lang="yaml" %} ```yaml # deployment.yaml (Resource Config) apiVersion: apps/v1 kind: Deployment metadata: # ... name: nginx-deployment spec: # ... ``` ```yaml # Original Resource kind: Deployment metadata: # ... name: nginx-deployment # Containers replicas and minReadySeconds kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"apps/v1","kind":"Deployment", "spec":{"replicas": "2", "minReadySeconds": "3", ...}, "metadata": {...}} spec: # ... minReadySeconds: 3 replicas: 2 status: # ... ``` ```yaml # Applied Resource kind: Deployment metadata: # ... name: nginx-deployment kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"apps/v1","kind":"Deployment", "spec":{...}, "metadata": {...}} spec: # ... # deleted and then defaulted, but not in Last Applied replicas: 1 # minReadySeconds deleted status: # ... ``` {% endmethod %} {% panel style="danger", title="Removing Fields from Resource Config" %} Simply removing a field from the Resource Config will *not* transfer the ownership to the cluster. Instead it will delete the field from the Resource. If a field is set in the Resource Config and the user wants to give up ownership (e.g. removing `replicas` from the Resource Config and using and autoscaler), the user must first remove it from the last Applied Resource Config stored by the cluster. This can be performed using `kubectl apply edit-last-applied` to delete the `replicas` field from the **Last Applied Resource Config**, and then deleting it from the **Resource Config.** {% endpanel %} ## Field Merge Semantics ### Merging Primitives Primitive fields are merged by replacing the current value with the new value. **Field Creation:** Add the primitive field **Field Update:** Change the primitive field value **Field Deletion:** Delete the primitive field | Field in Resource Config | Field in Resource | Field in Last Applied | Action | |---------------------------|-------------------|-----------------------|-----------------------------------------| | Yes | Yes | - | Set live to the Resource Config value. | | Yes | No | - | Set live to the Resource Config value. | | No | - | Yes | Remove from Resource. | | No | - | No | Do nothing. | ### Merging Objects Objects fields are updated by merging the sub-fields recursively (by field name) until a primitive field is found or the field is added / deleted. **Field Creation:** Add the object field **Field Update:** Recursively compare object sub-field values and merge them **Field Deletion:** Delete the object field **Merge Table:** For each field merge Resource Config and Resource values with the same name | Field in Resource Config | Field in Resource | Field in Last Applied | Action | |---------------------------|-------------------|-----------------------|-------------------------------------------| | Yes | Yes | - | Recursively merge the Resource Config and Resource values. | | Yes | No | - | Set live to the Resource Config value. | | No | - | Yes | Remove field from Resource. | | No | - | No | Do nothing. | ### Merging Maps Map fields are updated by merging the elements (by key) until a primitive field is found or the value is added / deleted. **Field Creation:** Add the map field **Field Update:** Recursively compare map values by key and merge them **Field Deletion:** Delete the map field **Merge Table:** For each map element merge Resource Config and Resource values with the same key | Key in Resource Config | Key in Resource | Key in Last Applied | Action | |---------------------------|-------------------|-----------------------|-------------------------------------------| | Yes | Yes | - | Recursively merge the Resource Config and Resource values. | | Yes | No | - | Set live to the Resource Config value. | | No | - | Yes | Remove map element from Resource. | | No | - | No | Do nothing. | ### Merging Lists of Primitives Lists of primitives will be merged if they have a `patch strategy: merge` on the field otherwise they will be replaced. [Finalizer list example](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#objectmeta-v1-meta) **Merge Strategy:** - Merged primitive lists behave like ordered sets - Replace primitive lists are replaced when merged **Ordering:** Uses the ordering specified in the Resource Config. Elements not specified in the Resource Config do not have ordering guarantees with respect to the elements in the Resource Config. **Merge Table:** For each list element merge Resource Config and Resource element with the same value | Element in Resource Config | Element in Resource | Element in Last Applied | Action | |---------------------------|-------------------|-----------------------|-----------------------------------------| | Yes | Yes | - | Do nothing | | Yes | No | - | Add to list. | | No | - | Yes | Remove from list. | | No | - | No | Do nothing. | {% method %} This merge strategy uses the patch merge key to identify container elements in a list and merge them. The `patch merge key` is defined in the [Kubernetes API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#podspec-v1-core) on the field. {% sample lang="yaml" %} ```yaml # Last Applied args: ["a", "b"] ``` ```yaml # Resource Config (Local) args: ["a", "c"] ``` ```yaml # Resource (Live) args: ["a", "b", "d"] ``` ```yaml # Applied Resource args: ["a", "c", "d"] ``` {% endmethod %} ### Merging Lists of Objects **Merge Strategy:** Lists of primitives may be merged or replaced. Lists are merged if the list has a `patch strategy` of *merge* and a `patch merge key` on the list field. [Container list example](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#podspec-v1-core). **Merge Key:** The `patch merge key` is used to identify same elements in a list. Unlike map elements (keyed by key) and object fields (keyed by field name), lists don't have a built-in merge identity for elements (index does not define identity). Instead an object field is used as a synthetic *key/value* for merging elements. This fields is the `patch merge key`. List elements with the same patch merge key will be merged when lists are merged. **Ordering:** Uses the ordering specified in the Resource Config. Elements not specified in the Resource Config do not have ordering guarantees. **Merge Table:** For each list element merge Resource Config and Resource element where the elements have the same value for the `patch merge key` | Element in Resource Config | Element in Resource | Element in Last Applied | Action | |---------------------------|-------------------|-----------------------|-----------------------------------------| | Yes | - | - | Recursively merge the Resource Config and Resource values. | | Yes | No | - | Add to list. | | No | - | Yes | Remove from list. | | No | - | No | Do nothing. | {% method %} This merge strategy uses the patch merge key to identify container elements in a list and merge them. The `patch merge key` is defined in the [Kubernetes API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#podspec-v1-core) on the field. {% sample lang="yaml" %} ```yaml # Last Applied Resource Config containers: - name: nginx # key: nginx image: nginx:1.10 - name: nginx-helper-a # key: nginx-helper-a; will be deleted in result image: helper:1.3 - name: nginx-helper-b # key: nginx-helper-b; will be retained image: helper:1.3 ``` ```yaml # Resource Config (Local) containers: - name: nginx image: nginx:1.10 - name: nginx-helper-b image: helper:1.3 - name: nginx-helper-c # key: nginx-helper-c; will be added in result image: helper:1.3 ``` ```yaml # Resource (Live) containers: - name: nginx image: nginx:1.10 - name: nginx-helper-a image: helper:1.3 - name: nginx-helper-b image: helper:1.3 args: ["run"] # Field will be retained - name: nginx-helper-d # key: nginx-helper-d; will be retained image: helper:1.3 ``` ```yaml # Applied Resource containers: - name: nginx image: nginx:1.10 # Element nginx-helper-a was Deleted - name: nginx-helper-b image: helper:1.3 # Field was Ignored args: ["run"] # Element was Added - name: nginx-helper-c image: helper:1.3 # Element was Ignored - name: nginx-helper-d image: helper:1.3 ``` {% endmethod %} {% panel style="info", title="Edit and Set" %} While `kubectl edit` and `kubectl set` ignore the Last Applied Resource Config, Apply will change any values in the Resource Config set by either `kubectl edit` or `kubectl set`. To ignore values set by `kubectl edit` or `kubectl set`: - Use `kubectl apply edit-last-applied` to remove the value from the Last Applied (if it is present) - Remove the field from the Resource Config This is the same technique for retaining values set by cluster components such as autoscalers. {% endpanel %}
staging/src/k8s.io/kubectl/docs/book/pages/app_management/field_merge_semantics.md
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017832637240644544, 0.00017081250553019345, 0.00016182484978344291, 0.00017026970454026014, 0.0000038920379665796645 ]
{ "id": 1, "code_window": [ "const (\n", "\tsubsystem = \"apiserver_audit\"\n", ")\n", "\n", "var (\n", "\teventCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/audit/metrics.go", "type": "add", "edit_start_line_idx": 31 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package audit import ( "fmt" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog" ) const ( subsystem = "apiserver_audit" ) var ( eventCounter = metrics.NewCounter( &metrics.CounterOpts{ Subsystem: subsystem, Name: "event_total", Help: "Counter of audit events generated and sent to the audit backend.", StabilityLevel: metrics.ALPHA, }) errorCounter = metrics.NewCounterVec( &metrics.CounterOpts{ Subsystem: subsystem, Name: "error_total", Help: "Counter of audit events that failed to be audited properly. " + "Plugin identifies the plugin affected by the error.", StabilityLevel: metrics.ALPHA, }, []string{"plugin"}, ) levelCounter = metrics.NewCounterVec( &metrics.CounterOpts{ Subsystem: subsystem, Name: "level_total", Help: "Counter of policy levels for audit events (1 per request).", StabilityLevel: metrics.ALPHA, }, []string{"level"}, ) ApiserverAuditDroppedCounter = metrics.NewCounter( &metrics.CounterOpts{ Subsystem: subsystem, Name: "requests_rejected_total", Help: "Counter of apiserver requests rejected due to an error " + "in audit logging backend.", StabilityLevel: metrics.ALPHA, }, ) ) func init() { legacyregistry.MustRegister(eventCounter) legacyregistry.MustRegister(errorCounter) legacyregistry.MustRegister(levelCounter) legacyregistry.MustRegister(ApiserverAuditDroppedCounter) } // ObserveEvent updates the relevant prometheus metrics for the generated audit event. func ObserveEvent() { eventCounter.Inc() } // ObservePolicyLevel updates the relevant prometheus metrics with the audit level for a request. func ObservePolicyLevel(level auditinternal.Level) { levelCounter.WithLabelValues(string(level)).Inc() } // HandlePluginError handles an error that occurred in an audit plugin. This method should only be // used if the error may have prevented the audit event from being properly recorded. The events are // logged to the debug log. func HandlePluginError(plugin string, err error, impacted ...*auditinternal.Event) { // Count the error. errorCounter.WithLabelValues(plugin).Add(float64(len(impacted))) // Log the audit events to the debug log. msg := fmt.Sprintf("Error in audit plugin '%s' affecting %d audit events: %v\nImpacted events:\n", plugin, len(impacted), err) for _, ev := range impacted { msg = msg + EventString(ev) + "\n" } klog.Error(msg) }
staging/src/k8s.io/apiserver/pkg/audit/metrics.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.9985241293907166, 0.2775106430053711, 0.0001745067274896428, 0.002178717404603958, 0.43429842591285706 ]
{ "id": 1, "code_window": [ "const (\n", "\tsubsystem = \"apiserver_audit\"\n", ")\n", "\n", "var (\n", "\teventCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/audit/metrics.go", "type": "add", "edit_start_line_idx": 31 }
package(default_visibility = ["//visibility:public"]) load( "@io_bazel_rules_go//go:def.bzl", "go_library", "go_test", ) go_library( name = "go_default_library", srcs = ["simple.go"], importmap = "k8s.io/kubernetes/vendor/k8s.io/client-go/metadata/fake", importpath = "k8s.io/client-go/metadata/fake", deps = [ "//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/metadata:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", ], ) filegroup( name = "package-srcs", srcs = glob(["**"]), tags = ["automanaged"], visibility = ["//visibility:private"], ) filegroup( name = "all-srcs", srcs = [":package-srcs"], tags = ["automanaged"], ) go_test( name = "go_default_test", srcs = ["simple_test.go"], embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], )
staging/src/k8s.io/client-go/metadata/fake/BUILD
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017561943968757987, 0.00017027191643137485, 0.00016361814050469548, 0.00017058756202459335, 0.000003594393092498649 ]
{ "id": 1, "code_window": [ "const (\n", "\tsubsystem = \"apiserver_audit\"\n", ")\n", "\n", "var (\n", "\teventCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/audit/metrics.go", "type": "add", "edit_start_line_idx": 31 }
apiVersion: v1 conditions: - error: "25" message: "24" status: 谐颋DžSǡƏS$+½H牗洝尿 type: 脽ěĂ凗蓏Ŋ蛊ĉy緅縕 kind: ComponentStatus metadata: annotations: "9": "10" clusterName: "15" creationTimestamp: null deletionGracePeriodSeconds: -4955867275792137171 finalizers: - "14" generateName: "3" generation: 8071137005907523419 labels: "7": "8" managedFields: - apiVersion: "17" fields: "18": "19": null manager: "16" operation: 鐊唊飙Ş-U圴÷a/ɔ}摁(湗Ć] name: "2" namespace: "4" ownerReferences: - apiVersion: "11" blockOwnerDeletion: true controller: false kind: "12" name: "13" uid: Dz廔ȇ{sŊƏp resourceVersion: "16964250748386560239" selfLink: "5" uid: ą飋īqJ枊a8衍`Ĩɘ.蘯6ċV夸e
staging/src/k8s.io/api/testdata/v1.15.0/core.v1.ComponentStatus.yaml
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017522017878945917, 0.00017164171731565148, 0.00016851097461767495, 0.0001714178652036935, 0.0000024679911803104915 ]
{ "id": 1, "code_window": [ "const (\n", "\tsubsystem = \"apiserver_audit\"\n", ")\n", "\n", "var (\n", "\teventCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/audit/metrics.go", "type": "add", "edit_start_line_idx": 31 }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcproxy import ( "context" "io" "github.com/coreos/etcd/clientv3" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" ) type maintenanceProxy struct { client *clientv3.Client } func NewMaintenanceProxy(c *clientv3.Client) pb.MaintenanceServer { return &maintenanceProxy{ client: c, } } func (mp *maintenanceProxy) Defragment(ctx context.Context, dr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) { conn := mp.client.ActiveConnection() return pb.NewMaintenanceClient(conn).Defragment(ctx, dr) } func (mp *maintenanceProxy) Snapshot(sr *pb.SnapshotRequest, stream pb.Maintenance_SnapshotServer) error { conn := mp.client.ActiveConnection() ctx, cancel := context.WithCancel(stream.Context()) defer cancel() ctx = withClientAuthToken(ctx, stream.Context()) sc, err := pb.NewMaintenanceClient(conn).Snapshot(ctx, sr) if err != nil { return err } for { rr, err := sc.Recv() if err != nil { if err == io.EOF { return nil } return err } err = stream.Send(rr) if err != nil { return err } } } func (mp *maintenanceProxy) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) { conn := mp.client.ActiveConnection() return pb.NewMaintenanceClient(conn).Hash(ctx, r) } func (mp *maintenanceProxy) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) { conn := mp.client.ActiveConnection() return pb.NewMaintenanceClient(conn).HashKV(ctx, r) } func (mp *maintenanceProxy) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { conn := mp.client.ActiveConnection() return pb.NewMaintenanceClient(conn).Alarm(ctx, r) } func (mp *maintenanceProxy) Status(ctx context.Context, r *pb.StatusRequest) (*pb.StatusResponse, error) { conn := mp.client.ActiveConnection() return pb.NewMaintenanceClient(conn).Status(ctx, r) } func (mp *maintenanceProxy) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) { conn := mp.client.ActiveConnection() return pb.NewMaintenanceClient(conn).MoveLeader(ctx, r) }
vendor/github.com/coreos/etcd/proxy/grpcproxy/maintenance.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017851180746220052, 0.00017193661187775433, 0.00016636984946671873, 0.00017148899496532977, 0.000003471484205874731 ]
{ "id": 2, "code_window": [ "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var clientCertificateExpirationHistogram = metrics.NewHistogram(\n", "\t&metrics.HistogramOpts{\n", "\t\tNamespace: \"apiserver\",\n", "\t\tSubsystem: \"client\",\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, the following metric is defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go", "type": "add", "edit_start_line_idx": 33 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package audit import ( "fmt" auditinternal "k8s.io/apiserver/pkg/apis/audit" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog" ) const ( subsystem = "apiserver_audit" ) var ( eventCounter = metrics.NewCounter( &metrics.CounterOpts{ Subsystem: subsystem, Name: "event_total", Help: "Counter of audit events generated and sent to the audit backend.", StabilityLevel: metrics.ALPHA, }) errorCounter = metrics.NewCounterVec( &metrics.CounterOpts{ Subsystem: subsystem, Name: "error_total", Help: "Counter of audit events that failed to be audited properly. " + "Plugin identifies the plugin affected by the error.", StabilityLevel: metrics.ALPHA, }, []string{"plugin"}, ) levelCounter = metrics.NewCounterVec( &metrics.CounterOpts{ Subsystem: subsystem, Name: "level_total", Help: "Counter of policy levels for audit events (1 per request).", StabilityLevel: metrics.ALPHA, }, []string{"level"}, ) ApiserverAuditDroppedCounter = metrics.NewCounter( &metrics.CounterOpts{ Subsystem: subsystem, Name: "requests_rejected_total", Help: "Counter of apiserver requests rejected due to an error " + "in audit logging backend.", StabilityLevel: metrics.ALPHA, }, ) ) func init() { legacyregistry.MustRegister(eventCounter) legacyregistry.MustRegister(errorCounter) legacyregistry.MustRegister(levelCounter) legacyregistry.MustRegister(ApiserverAuditDroppedCounter) } // ObserveEvent updates the relevant prometheus metrics for the generated audit event. func ObserveEvent() { eventCounter.Inc() } // ObservePolicyLevel updates the relevant prometheus metrics with the audit level for a request. func ObservePolicyLevel(level auditinternal.Level) { levelCounter.WithLabelValues(string(level)).Inc() } // HandlePluginError handles an error that occurred in an audit plugin. This method should only be // used if the error may have prevented the audit event from being properly recorded. The events are // logged to the debug log. func HandlePluginError(plugin string, err error, impacted ...*auditinternal.Event) { // Count the error. errorCounter.WithLabelValues(plugin).Add(float64(len(impacted))) // Log the audit events to the debug log. msg := fmt.Sprintf("Error in audit plugin '%s' affecting %d audit events: %v\nImpacted events:\n", plugin, len(impacted), err) for _, ev := range impacted { msg = msg + EventString(ev) + "\n" } klog.Error(msg) }
staging/src/k8s.io/apiserver/pkg/audit/metrics.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.009961288422346115, 0.0025851742830127478, 0.00016517832409590483, 0.00022046944650355726, 0.0032118787057697773 ]
{ "id": 2, "code_window": [ "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var clientCertificateExpirationHistogram = metrics.NewHistogram(\n", "\t&metrics.HistogramOpts{\n", "\t\tNamespace: \"apiserver\",\n", "\t\tSubsystem: \"client\",\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, the following metric is defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go", "type": "add", "edit_start_line_idx": 33 }
# Go gRPC Interceptors for Prometheus monitoring [![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus) [![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus) [![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus) [![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge) [![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus) [![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) [Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients. A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus). ## Interceptors [gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement common patterns: auth, logging and... monitoring. To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware). ## Usage There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both. ### Server-side ```go import "github.com/grpc-ecosystem/go-grpc-prometheus" ... // Initialize your gRPC server's interceptor. myServer := grpc.NewServer( grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), ) // Register your gRPC service implementations. myservice.RegisterMyServiceServer(s.server, &myServiceImpl{}) // After all your registrations, make sure all of the Prometheus metrics are initialized. grpc_prometheus.Register(myServer) // Register Prometheus metrics handler. http.Handle("/metrics", promhttp.Handler()) ... ``` ### Client-side ```go import "github.com/grpc-ecosystem/go-grpc-prometheus" ... clientConn, err = grpc.Dial( address, grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor) ) client = pb_testproto.NewTestServiceClient(clientConn) resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) ... ``` # Metrics ## Labels All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods contain the same rich labels: * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"` * `grpc_method` - the name of the method called on the gRPC service. E.g. `grpc_method="Ping"` * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). Differentiating between the two is important especially for latency measurements. - `unary` is single request, single response RPC - `client_stream` is a multi-request, single response RPC - `server_stream` is a single request, multi-response RPC - `bidi_stream` is a multi-request, multi-response RPC Additionally for completed RPCs, the following labels are used: * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go). The list of all statuses is to long, but here are some common ones: - `OK` - means the RPC was successful - `IllegalArgument` - RPC contained bad values - `Internal` - server-side error not disclosed to the clients ## Counters The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) the respective Prometheus handler (usually `/metrics`). For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts. For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto), calling the method `PingList`. The call succeeds and returns 20 messages in the stream. First, immediately after the server receives the call it will increment the `grpc_server_started_total` and start the handling time clock (if histograms are enabled). ```jsoniq grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 ``` Then the user logic gets invoked. It receives one message from the client containing the request (it's a `server_stream`): ```jsoniq grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 ``` The user logic may return an error, or send multiple messages back to the client. In this case, on each of the 20 messages sent back, a counter will be incremented: ```jsoniq grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20 ``` After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) and the relevant call labels increment the `grpc_server_handled_total` counter. ```jsoniq grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 ``` ## Histograms [Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way to measure latency distributions of your RPCs. However, since it is bad practice to have metrics of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels) the latency monitoring metrics are disabled by default. To enable them please call the following in your server initialization code: ```jsoniq grpc_prometheus.EnableHandlingTimeHistogram() ``` After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics: * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for calculating average handling times * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/)) The counter values will look as follows: ```jsoniq grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1 grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1 grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001 grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 ``` ## Useful query examples Prometheus philosophy is to provide raw metrics to the monitoring system, and let the aggregations be handled there. The verbosity of above metrics make it possible to have that flexibility. Here's a couple of useful monitoring queries: ### request inbound rate ```jsoniq sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service) ``` For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together. ### unary request error rate ```jsoniq sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) ``` For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the ones that didn't finish with `OK` code. ### unary request error percentage ```jsoniq sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) / sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service) * 100.0 ``` For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that this is a combination of the two above examples. This is an example of a query you would like to [alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g. "no more than 1% requests should fail". ### average response stream size ```jsoniq sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) / sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) ``` For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all ` server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows you to track when clients started to send "wide" queries that ret Note the divisor is the number of started RPCs, in order to account for in-flight requests. ### 99%-tile latency of unary requests ```jsoniq histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le) ) ``` For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles) of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile estimation will take samples in a rolling `5m` window. When combined with other quantiles (e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system (e.g. impact of caching). ### percentage of slow unary queries (>250ms) ```jsoniq 100.0 - ( sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service) / sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service) ) * 100.0 ``` For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal) buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps. This is an example of a query you would like to alert on in your system for SLA violations, e.g. "less than 1% of requests are slower than 250ms". ## Status This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io). ## License `go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.0003564192447811365, 0.00017763303185347468, 0.0001607565936865285, 0.0001660026900935918, 0.00003899775765603408 ]
{ "id": 2, "code_window": [ "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var clientCertificateExpirationHistogram = metrics.NewHistogram(\n", "\t&metrics.HistogramOpts{\n", "\t\tNamespace: \"apiserver\",\n", "\t\tSubsystem: \"client\",\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, the following metric is defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go", "type": "add", "edit_start_line_idx": 33 }
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x // +build darwin dragonfly freebsd linux netbsd openbsd package socket import "unsafe" func (v *iovec) set(b []byte) { l := len(b) if l == 0 { return } v.Base = (*byte)(unsafe.Pointer(&b[0])) v.Len = uint64(l) }
vendor/golang.org/x/net/internal/socket/iovec_64bit.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.0001775933342287317, 0.00017713419219944626, 0.00017667505017016083, 0.00017713419219944626, 4.591420292854309e-7 ]
{ "id": 2, "code_window": [ "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var clientCertificateExpirationHistogram = metrics.NewHistogram(\n", "\t&metrics.HistogramOpts{\n", "\t\tNamespace: \"apiserver\",\n", "\t\tSubsystem: \"client\",\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, the following metric is defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/authentication/request/x509/x509.go", "type": "add", "edit_start_line_idx": 33 }
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package config import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" componentbaseconfig "k8s.io/component-base/config" ) const ( // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") SchedulerDefaultLockObjectName = "kube-scheduler" // SchedulerPolicyConfigMapKey defines the key of the element in the // scheduler's policy ConfigMap that contains scheduler's policy config. SchedulerPolicyConfigMapKey = "policy.cfg" // SchedulerDefaultProviderName defines the default provider names SchedulerDefaultProviderName = "DefaultProvider" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KubeSchedulerConfiguration configures a scheduler type KubeSchedulerConfiguration struct { metav1.TypeMeta // SchedulerName is name of the scheduler, used to select which pods // will be processed by this scheduler, based on pod's "spec.SchedulerName". SchedulerName string // AlgorithmSource specifies the scheduler algorithm source. AlgorithmSource SchedulerAlgorithmSource // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule // corresponding to every RequiredDuringScheduling affinity rule. // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 0-100. HardPodAffinitySymmetricWeight int32 // LeaderElection defines the configuration of leader election client. LeaderElection KubeSchedulerLeaderElectionConfiguration // ClientConnection specifies the kubeconfig file and client connection // settings for the proxy server to use when communicating with the apiserver. ClientConnection componentbaseconfig.ClientConnectionConfiguration // HealthzBindAddress is the IP address and port for the health check server to serve on, // defaulting to 0.0.0.0:10251 HealthzBindAddress string // MetricsBindAddress is the IP address and port for the metrics server to // serve on, defaulting to 0.0.0.0:10251. MetricsBindAddress string // DebuggingConfiguration holds configuration for Debugging related features // TODO: We might wanna make this a substruct like Debugging componentbaseconfig.DebuggingConfiguration componentbaseconfig.DebuggingConfiguration // DisablePreemption disables the pod preemption feature. DisablePreemption bool // PercentageOfNodeToScore is the percentage of all nodes that once found feasible // for running a pod, the scheduler stops its search for more feasible nodes in // the cluster. This helps improve scheduler's performance. Scheduler always tries to find // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. // Example: if the cluster size is 500 nodes and the value of this flag is 30, // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the // nodes will be scored. PercentageOfNodesToScore int32 // Duration to wait for a binding operation to complete before timing out // Value must be non-negative integer. The value zero indicates no waiting. // If this value is nil, the default value will be used. BindTimeoutSeconds *int64 // Plugins specify the set of plugins that should be enabled or disabled. Enabled plugins are the // ones that should be enabled in addition to the default plugins. Disabled plugins are any of the // default plugins that should be disabled. // When no enabled or disabled plugin is specified for an extension point, default plugins for // that extension point will be used if there is any. Plugins *Plugins // PluginConfig is an optional set of custom plugin arguments for each plugin. // Omitting config args for a plugin is equivalent to using the default config for that plugin. PluginConfig []PluginConfig } // SchedulerAlgorithmSource is the source of a scheduler algorithm. One source // field must be specified, and source fields are mutually exclusive. type SchedulerAlgorithmSource struct { // Policy is a policy based algorithm source. Policy *SchedulerPolicySource // Provider is the name of a scheduling algorithm provider to use. Provider *string } // SchedulerPolicySource configures a means to obtain a scheduler Policy. One // source field must be specified, and source fields are mutually exclusive. type SchedulerPolicySource struct { // File is a file policy source. File *SchedulerPolicyFileSource // ConfigMap is a config map policy source. ConfigMap *SchedulerPolicyConfigMapSource } // SchedulerPolicyFileSource is a policy serialized to disk and accessed via // path. type SchedulerPolicyFileSource struct { // Path is the location of a serialized policy. Path string } // SchedulerPolicyConfigMapSource is a policy serialized into a config map value // under the SchedulerPolicyConfigMapKey key. type SchedulerPolicyConfigMapSource struct { // Namespace is the namespace of the policy config map. Namespace string // Name is the name of hte policy config map. Name string } // KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration // to include scheduler specific configuration. type KubeSchedulerLeaderElectionConfiguration struct { componentbaseconfig.LeaderElectionConfiguration } // Plugins include multiple extension points. When specified, the list of plugins for // a particular extension point are the only ones enabled. If an extension point is // omitted from the config, then the default set of plugins is used for that extension point. // Enabled plugins are called in the order specified here, after default plugins. If they need to // be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. type Plugins struct { // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. QueueSort *PluginSet // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. PreFilter *PluginSet // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. Filter *PluginSet // PostFilter is a list of plugins that are invoked after filtering out infeasible nodes. PostFilter *PluginSet // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. Score *PluginSet // Reserve is a list of plugins invoked when reserving a node to run the pod. Reserve *PluginSet // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. Permit *PluginSet // PreBind is a list of plugins that should be invoked before a pod is bound. PreBind *PluginSet // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. Bind *PluginSet // PostBind is a list of plugins that should be invoked after a pod is successfully bound. PostBind *PluginSet // Unreserve is a list of plugins invoked when a pod that was previously reserved is rejected in a later phase. Unreserve *PluginSet } // PluginSet specifies enabled and disabled plugins for an extension point. // If an array is empty, missing, or nil, default plugins at that extension point will be used. type PluginSet struct { // Enabled specifies plugins that should be enabled in addition to default plugins. // These are called after default plugins and in the same order specified here. Enabled []Plugin // Disabled specifies default plugins that should be disabled. // When all default plugins need to be disabled, an array containing only one "*" should be provided. Disabled []Plugin } // Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. type Plugin struct { // Name defines the name of plugin Name string // Weight defines the weight of plugin, only used for Score plugins. Weight int32 } // PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. // A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. // It is up to the plugin to process these Args. type PluginConfig struct { // Name defines the name of plugin being configured Name string // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. Args runtime.Unknown }
pkg/scheduler/apis/config/types.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.007467641495168209, 0.0006374624790623784, 0.00016218678501900285, 0.00017659217701293528, 0.0015551383839920163 ]
{ "id": 3, "code_window": [ "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", "\t\"k8s.io/klog\"\n", ")\n", "\n", "var (\n", "\tauthenticatedUserCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go", "type": "add", "edit_start_line_idx": 34 }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metrics import ( "bufio" "net" "net/http" "net/url" "regexp" "strconv" "strings" "sync" "time" restful "github.com/emicklei/go-restful" "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/types" utilnet "k8s.io/apimachinery/pkg/util/net" utilsets "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/features" utilfeature "k8s.io/apiserver/pkg/util/feature" compbasemetrics "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) // resettableCollector is the interface implemented by prometheus.MetricVec // that can be used by Prometheus to collect metrics and reset their values. type resettableCollector interface { compbasemetrics.Registerable Reset() } const ( APIServerComponent string = "apiserver" ) var ( // TODO(a-robinson): Add unit tests for the handling of these metrics once // the upstream library supports it. requestCounter = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ Name: "apiserver_request_total", Help: "Counter of apiserver requests broken out for each verb, dry run value, group, version, resource, scope, component, client, and HTTP response contentType and code.", StabilityLevel: compbasemetrics.ALPHA, }, // The label_name contentType doesn't follow the label_name convention defined here: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/instrumentation.md // But changing it would break backwards compatibility. Future label_names // should be all lowercase and separated by underscores. []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component", "client", "contentType", "code"}, ) deprecatedRequestCounter = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ Name: "apiserver_request_count", Help: "(Deprecated) Counter of apiserver requests broken out for each verb, group, version, resource, scope, component, client, and HTTP response contentType and code.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component", "client", "contentType", "code"}, ) longRunningRequestGauge = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "apiserver_longrunning_gauge", Help: "Gauge of all active long-running apiserver requests broken out by verb, group, version, resource, scope and component. Not all requests are tracked this way.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) requestLatencies = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "apiserver_request_duration_seconds", Help: "Response latency distribution in seconds for each verb, dry run value, group, version, resource, subresource, scope and component.", // This metric is used for verifying api call latencies SLO, // as well as tracking regressions in this aspects. // Thus we customize buckets significantly, to empower both usecases. Buckets: []float64{0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.25, 1.5, 1.75, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5, 6, 7, 8, 9, 10, 15, 20, 25, 30, 40, 50, 60}, StabilityLevel: compbasemetrics.ALPHA, }, []string{"verb", "dry_run", "group", "version", "resource", "subresource", "scope", "component"}, ) deprecatedRequestLatencies = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "apiserver_request_latencies", Help: "(Deprecated) Response latency distribution in microseconds for each verb, group, version, resource, subresource, scope and component.", // Use buckets ranging from 125 ms to 8 seconds. Buckets: prometheus.ExponentialBuckets(125000, 2.0, 7), StabilityLevel: compbasemetrics.ALPHA, }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) deprecatedRequestLatenciesSummary = compbasemetrics.NewSummaryVec( &compbasemetrics.SummaryOpts{ Name: "apiserver_request_latencies_summary", Help: "(Deprecated) Response latency summary in microseconds for each verb, group, version, resource, subresource, scope and component.", // Make the sliding window of 5h. // TODO: The value for this should be based on our SLI definition (medium term). MaxAge: 5 * time.Hour, StabilityLevel: compbasemetrics.ALPHA, }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) responseSizes = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "apiserver_response_sizes", Help: "Response size distribution in bytes for each group, version, verb, resource, subresource, scope and component.", // Use buckets ranging from 1000 bytes (1KB) to 10^9 bytes (1GB). Buckets: prometheus.ExponentialBuckets(1000, 10.0, 7), StabilityLevel: compbasemetrics.ALPHA, }, []string{"verb", "group", "version", "resource", "subresource", "scope", "component"}, ) // DroppedRequests is a number of requests dropped with 'Try again later' response" DroppedRequests = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ Name: "apiserver_dropped_requests_total", Help: "Number of requests dropped with 'Try again later' response", StabilityLevel: compbasemetrics.ALPHA, }, []string{"requestKind"}, ) DeprecatedDroppedRequests = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ Name: "apiserver_dropped_requests", Help: "(Deprecated) Number of requests dropped with 'Try again later' response", StabilityLevel: compbasemetrics.ALPHA, }, []string{"requestKind"}, ) // RegisteredWatchers is a number of currently registered watchers splitted by resource. RegisteredWatchers = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "apiserver_registered_watchers", Help: "Number of currently registered watchers for a given resources", StabilityLevel: compbasemetrics.ALPHA, }, []string{"group", "version", "kind"}, ) WatchEvents = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ Name: "apiserver_watch_events_total", Help: "Number of events sent in watch clients", StabilityLevel: compbasemetrics.ALPHA, }, []string{"group", "version", "kind"}, ) WatchEventsSizes = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "apiserver_watch_events_sizes", Help: "Watch event size distribution in bytes", Buckets: prometheus.ExponentialBuckets(1024, 2.0, 8), // 1K, 2K, 4K, 8K, ..., 128K. StabilityLevel: compbasemetrics.ALPHA, }, []string{"group", "version", "kind"}, ) // Because of volatality of the base metric this is pre-aggregated one. Instead of reporing current usage all the time // it reports maximal usage during the last second. currentInflightRequests = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "apiserver_current_inflight_requests", Help: "Maximal number of currently used inflight request limit of this apiserver per request kind in last second.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"requestKind"}, ) kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) metrics = []resettableCollector{ requestCounter, deprecatedRequestCounter, longRunningRequestGauge, requestLatencies, deprecatedRequestLatencies, deprecatedRequestLatenciesSummary, responseSizes, DroppedRequests, DeprecatedDroppedRequests, RegisteredWatchers, WatchEvents, WatchEventsSizes, currentInflightRequests, } ) const ( // ReadOnlyKind is a string identifying read only request kind ReadOnlyKind = "readOnly" // MutatingKind is a string identifying mutating request kind MutatingKind = "mutating" ) var registerMetrics sync.Once // Register all metrics. func Register() { registerMetrics.Do(func() { for _, metric := range metrics { legacyregistry.MustRegister(metric) } }) } // Reset all metrics. func Reset() { for _, metric := range metrics { metric.Reset() } } func UpdateInflightRequestMetrics(nonmutating, mutating int) { currentInflightRequests.WithLabelValues(ReadOnlyKind).Set(float64(nonmutating)) currentInflightRequests.WithLabelValues(MutatingKind).Set(float64(mutating)) } // Record records a single request to the standard metrics endpoints. For use by handlers that perform their own // processing. All API paths should use InstrumentRouteFunc implicitly. Use this instead of MonitorRequest if // you already have a RequestInfo object. func Record(req *http.Request, requestInfo *request.RequestInfo, component, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) { if requestInfo == nil { requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } scope := CleanScope(requestInfo) // We don't use verb from <requestInfo>, as for the healthy path // MonitorRequest is called from InstrumentRouteFunc which is registered // in installer.go with predefined list of verbs (different than those // translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. verb := canonicalVerb(strings.ToUpper(req.Method), scope) if requestInfo.IsResourceRequest { MonitorRequest(req, verb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, contentType, code, responseSizeInBytes, elapsed) } else { MonitorRequest(req, verb, "", "", "", requestInfo.Path, scope, component, contentType, code, responseSizeInBytes, elapsed) } } // RecordLongRunning tracks the execution of a long running request against the API server. It provides an accurate count // of the total number of open long running requests. requestInfo may be nil if the caller is not in the normal request flow. func RecordLongRunning(req *http.Request, requestInfo *request.RequestInfo, component string, fn func()) { if requestInfo == nil { requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } var g compbasemetrics.GaugeMetric scope := CleanScope(requestInfo) // We don't use verb from <requestInfo>, as for the healthy path // MonitorRequest is called from InstrumentRouteFunc which is registered // in installer.go with predefined list of verbs (different than those // translated to RequestInfo). // However, we need to tweak it e.g. to differentiate GET from LIST. reportedVerb := cleanVerb(canonicalVerb(strings.ToUpper(req.Method), scope), req) if requestInfo.IsResourceRequest { g = longRunningRequestGauge.WithLabelValues(reportedVerb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component) } else { g = longRunningRequestGauge.WithLabelValues(reportedVerb, "", "", "", requestInfo.Path, scope, component) } g.Inc() defer g.Dec() fn() } // MonitorRequest handles standard transformations for client and the reported verb and then invokes Monitor to record // a request. verb must be uppercase to be backwards compatible with existing monitoring tooling. func MonitorRequest(req *http.Request, verb, group, version, resource, subresource, scope, component, contentType string, httpCode, respSize int, elapsed time.Duration) { reportedVerb := cleanVerb(verb, req) dryRun := cleanDryRun(req.URL) client := cleanUserAgent(utilnet.GetHTTPClient(req)) elapsedMicroseconds := float64(elapsed / time.Microsecond) elapsedSeconds := elapsed.Seconds() requestCounter.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component, client, contentType, codeToString(httpCode)).Inc() deprecatedRequestCounter.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component, client, contentType, codeToString(httpCode)).Inc() requestLatencies.WithLabelValues(reportedVerb, dryRun, group, version, resource, subresource, scope, component).Observe(elapsedSeconds) deprecatedRequestLatencies.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(elapsedMicroseconds) deprecatedRequestLatenciesSummary.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(elapsedMicroseconds) // We are only interested in response sizes of read requests. if verb == "GET" || verb == "LIST" { responseSizes.WithLabelValues(reportedVerb, group, version, resource, subresource, scope, component).Observe(float64(respSize)) } } // InstrumentRouteFunc works like Prometheus' InstrumentHandlerFunc but wraps // the go-restful RouteFunction instead of a HandlerFunc plus some Kubernetes endpoint specific information. func InstrumentRouteFunc(verb, group, version, resource, subresource, scope, component string, routeFunc restful.RouteFunction) restful.RouteFunction { return restful.RouteFunction(func(request *restful.Request, response *restful.Response) { now := time.Now() delegate := &ResponseWriterDelegator{ResponseWriter: response.ResponseWriter} _, cn := response.ResponseWriter.(http.CloseNotifier) _, fl := response.ResponseWriter.(http.Flusher) _, hj := response.ResponseWriter.(http.Hijacker) var rw http.ResponseWriter if cn && fl && hj { rw = &fancyResponseWriterDelegator{delegate} } else { rw = delegate } response.ResponseWriter = rw routeFunc(request, response) MonitorRequest(request.Request, verb, group, version, resource, subresource, scope, component, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) }) } // InstrumentHandlerFunc works like Prometheus' InstrumentHandlerFunc but adds some Kubernetes endpoint specific information. func InstrumentHandlerFunc(verb, group, version, resource, subresource, scope, component string, handler http.HandlerFunc) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { now := time.Now() delegate := &ResponseWriterDelegator{ResponseWriter: w} _, cn := w.(http.CloseNotifier) _, fl := w.(http.Flusher) _, hj := w.(http.Hijacker) if cn && fl && hj { w = &fancyResponseWriterDelegator{delegate} } else { w = delegate } handler(w, req) MonitorRequest(req, verb, group, version, resource, subresource, scope, component, delegate.Header().Get("Content-Type"), delegate.Status(), delegate.ContentLength(), time.Since(now)) } } // CleanScope returns the scope of the request. func CleanScope(requestInfo *request.RequestInfo) string { if requestInfo.Namespace != "" { return "namespace" } if requestInfo.Name != "" { return "resource" } if requestInfo.IsResourceRequest { return "cluster" } // this is the empty scope return "" } func canonicalVerb(verb string, scope string) string { switch verb { case "GET", "HEAD": if scope != "resource" { return "LIST" } return "GET" default: return verb } } func cleanVerb(verb string, request *http.Request) string { reportedVerb := verb if verb == "LIST" { // see apimachinery/pkg/runtime/conversion.go Convert_Slice_string_To_bool if values := request.URL.Query()["watch"]; len(values) > 0 { if value := strings.ToLower(values[0]); value != "0" && value != "false" { reportedVerb = "WATCH" } } } // normalize the legacy WATCHLIST to WATCH to ensure users aren't surprised by metrics if verb == "WATCHLIST" { reportedVerb = "WATCH" } if verb == "PATCH" && request.Header.Get("Content-Type") == string(types.ApplyPatchType) && utilfeature.DefaultFeatureGate.Enabled(features.ServerSideApply) { reportedVerb = "APPLY" } return reportedVerb } func cleanDryRun(u *url.URL) string { // avoid allocating when we don't see dryRun in the query if !strings.Contains(u.RawQuery, "dryRun") { return "" } dryRun := u.Query()["dryRun"] if errs := validation.ValidateDryRun(nil, dryRun); len(errs) > 0 { return "invalid" } // Since dryRun could be valid with any arbitrarily long length // we have to dedup and sort the elements before joining them together // TODO: this is a fairly large allocation for what it does, consider // a sort and dedup in a single pass return strings.Join(utilsets.NewString(dryRun...).List(), ",") } func cleanUserAgent(ua string) string { // We collapse all "web browser"-type user agents into one "browser" to reduce metric cardinality. if strings.HasPrefix(ua, "Mozilla/") { return "Browser" } // If an old "kubectl.exe" has passed us its full path, we discard the path portion. if kubectlExeRegexp.MatchString(ua) { // avoid an allocation ua = kubectlExeRegexp.ReplaceAllString(ua, "$1") } return ua } // ResponseWriterDelegator interface wraps http.ResponseWriter to additionally record content-length, status-code, etc. type ResponseWriterDelegator struct { http.ResponseWriter status int written int64 wroteHeader bool } func (r *ResponseWriterDelegator) WriteHeader(code int) { r.status = code r.wroteHeader = true r.ResponseWriter.WriteHeader(code) } func (r *ResponseWriterDelegator) Write(b []byte) (int, error) { if !r.wroteHeader { r.WriteHeader(http.StatusOK) } n, err := r.ResponseWriter.Write(b) r.written += int64(n) return n, err } func (r *ResponseWriterDelegator) Status() int { return r.status } func (r *ResponseWriterDelegator) ContentLength() int { return int(r.written) } type fancyResponseWriterDelegator struct { *ResponseWriterDelegator } func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { return f.ResponseWriter.(http.CloseNotifier).CloseNotify() } func (f *fancyResponseWriterDelegator) Flush() { f.ResponseWriter.(http.Flusher).Flush() } func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { return f.ResponseWriter.(http.Hijacker).Hijack() } // Small optimization over Itoa func codeToString(s int) string { switch s { case 100: return "100" case 101: return "101" case 200: return "200" case 201: return "201" case 202: return "202" case 203: return "203" case 204: return "204" case 205: return "205" case 206: return "206" case 300: return "300" case 301: return "301" case 302: return "302" case 304: return "304" case 305: return "305" case 307: return "307" case 400: return "400" case 401: return "401" case 402: return "402" case 403: return "403" case 404: return "404" case 405: return "405" case 406: return "406" case 407: return "407" case 408: return "408" case 409: return "409" case 410: return "410" case 411: return "411" case 412: return "412" case 413: return "413" case 414: return "414" case 415: return "415" case 416: return "416" case 417: return "417" case 418: return "418" case 500: return "500" case 501: return "501" case 502: return "502" case 503: return "503" case 504: return "504" case 505: return "505" case 428: return "428" case 429: return "429" case 431: return "431" case 511: return "511" default: return strconv.Itoa(s) } }
staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.9696813821792603, 0.017534926533699036, 0.00016422678891103715, 0.0001749499060679227, 0.127241849899292 ]
{ "id": 3, "code_window": [ "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", "\t\"k8s.io/klog\"\n", ")\n", "\n", "var (\n", "\tauthenticatedUserCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go", "type": "add", "edit_start_line_idx": 34 }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package storage import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/printers" printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" printerstorage "k8s.io/kubernetes/pkg/printers/storage" "k8s.io/kubernetes/pkg/registry/apps/daemonset" ) // REST implements a RESTStorage for DaemonSets type REST struct { *genericregistry.Store categories []string } // NewREST returns a RESTStorage object that will work against DaemonSets. func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) { store := &genericregistry.Store{ NewFunc: func() runtime.Object { return &apps.DaemonSet{} }, NewListFunc: func() runtime.Object { return &apps.DaemonSetList{} }, DefaultQualifiedResource: apps.Resource("daemonsets"), CreateStrategy: daemonset.Strategy, UpdateStrategy: daemonset.Strategy, DeleteStrategy: daemonset.Strategy, TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, } options := &generic.StoreOptions{RESTOptions: optsGetter} if err := store.CompleteWithOptions(options); err != nil { return nil, nil, err } statusStore := *store statusStore.UpdateStrategy = daemonset.StatusStrategy return &REST{store, []string{"all"}}, &StatusREST{store: &statusStore}, nil } // Implement ShortNamesProvider var _ rest.ShortNamesProvider = &REST{} // ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource. func (r *REST) ShortNames() []string { return []string{"ds"} } var _ rest.CategoriesProvider = &REST{} // Categories implements the CategoriesProvider interface. Returns a list of categories a resource is part of. func (r *REST) Categories() []string { return r.categories } // WithCategories sets categories for REST. func (r *REST) WithCategories(categories []string) *REST { r.categories = categories return r } // StatusREST implements the REST endpoint for changing the status of a daemonset type StatusREST struct { store *genericregistry.Store } // New creates a new DaemonSet object. func (r *StatusREST) New() runtime.Object { return &apps.DaemonSet{} } // Get retrieves the object from the storage. It is required to support Patch. func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { return r.store.Get(ctx, name, options) } // Update alters the status subset of an object. func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { // We are explicitly setting forceAllowCreate to false in the call to the underlying storage because // subresources should never allow create on update. return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options) }
pkg/registry/apps/daemonset/storage/storage.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00027790505555458367, 0.00018022999574895948, 0.00016263122961390764, 0.00017081234545912594, 0.00003129492324660532 ]
{ "id": 3, "code_window": [ "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", "\t\"k8s.io/klog\"\n", ")\n", "\n", "var (\n", "\tauthenticatedUserCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go", "type": "add", "edit_start_line_idx": 34 }
package storage // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/validation" "github.com/Azure/go-autorest/tracing" "net/http" ) // BlobContainersClient is the the Azure Storage Management API. type BlobContainersClient struct { BaseClient } // NewBlobContainersClient creates an instance of the BlobContainersClient client. func NewBlobContainersClient(subscriptionID string) BlobContainersClient { return NewBlobContainersClientWithBaseURI(DefaultBaseURI, subscriptionID) } // NewBlobContainersClientWithBaseURI creates an instance of the BlobContainersClient client. func NewBlobContainersClientWithBaseURI(baseURI string, subscriptionID string) BlobContainersClient { return BlobContainersClient{NewWithBaseURI(baseURI, subscriptionID)} } // ClearLegalHold clears legal hold tags. Clearing the same or non-existent tag results in an idempotent operation. // ClearLegalHold clears out only the specified tags in the request. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // legalHold - the LegalHold property that will be clear from a blob container. func (client BlobContainersClient) ClearLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ClearLegalHold") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: legalHold, Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "ClearLegalHold", err.Error()) } req, err := client.ClearLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", nil, "Failure preparing request") return } resp, err := client.ClearLegalHoldSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure sending request") return } result, err = client.ClearLegalHoldResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ClearLegalHold", resp, "Failure responding to request") } return } // ClearLegalHoldPreparer prepares the ClearLegalHold request. func (client BlobContainersClient) ClearLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } legalHold.HasLegalHold = nil preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold", pathParameters), autorest.WithJSON(legalHold), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ClearLegalHoldSender sends the ClearLegalHold request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) ClearLegalHoldSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // ClearLegalHoldResponder handles the response to the ClearLegalHold request. The method always // closes the http.Response Body. func (client BlobContainersClient) ClearLegalHoldResponder(resp *http.Response) (result LegalHold, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Create creates a new container under the specified account as described by request body. The container resource // includes metadata and properties for that container. It does not include a list of the blobs contained by the // container. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // blobContainer - properties of the blob container to create. func (client BlobContainersClient) Create(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Create") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: blobContainer, Constraints: []validation.Constraint{{Target: "blobContainer.ContainerProperties", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy.ImmutabilityPolicyProperty", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "blobContainer.ContainerProperties.ImmutabilityPolicy.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}}, }}, }}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "Create", err.Error()) } req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", nil, "Failure preparing request") return } resp, err := client.CreateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure sending request") return } result, err = client.CreateResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Create", resp, "Failure responding to request") } return } // CreatePreparer prepares the Create request. func (client BlobContainersClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), autorest.WithJSON(blobContainer), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateSender sends the Create request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) CreateSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // CreateResponder handles the response to the Create request. The method always // closes the http.Response Body. func (client BlobContainersClient) CreateResponder(resp *http.Response) (result BlobContainer, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // CreateOrUpdateImmutabilityPolicy creates or updates an unlocked immutability policy. ETag in If-Match is honored if // given but not required for this operation. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // parameters - the ImmutabilityPolicy Properties that will be created or updated to a blob container. // ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used // to apply the operation only if the immutability policy already exists. If omitted, this operation will // always be applied. func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (result ImmutabilityPolicy, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.CreateOrUpdateImmutabilityPolicy") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}}, }}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", err.Error()) } req, err := client.CreateOrUpdateImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, parameters, ifMatch) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", nil, "Failure preparing request") return } resp, err := client.CreateOrUpdateImmutabilityPolicySender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure sending request") return } result, err = client.CreateOrUpdateImmutabilityPolicyResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "CreateOrUpdateImmutabilityPolicy", resp, "Failure responding to request") } return } // CreateOrUpdateImmutabilityPolicyPreparer prepares the CreateOrUpdateImmutabilityPolicy request. func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *ImmutabilityPolicy, ifMatch string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "immutabilityPolicyName": autorest.Encode("path", "default"), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), autorest.WithQueryParameters(queryParameters)) if parameters != nil { preparer = autorest.DecoratePreparer(preparer, autorest.WithJSON(parameters)) } if len(ifMatch) > 0 { preparer = autorest.DecoratePreparer(preparer, autorest.WithHeader("If-Match", autorest.String(ifMatch))) } return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateOrUpdateImmutabilityPolicySender sends the CreateOrUpdateImmutabilityPolicy request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicySender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // CreateOrUpdateImmutabilityPolicyResponder handles the response to the CreateOrUpdateImmutabilityPolicy request. The method always // closes the http.Response Body. func (client BlobContainersClient) CreateOrUpdateImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Delete deletes specified container under its account. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. func (client BlobContainersClient) Delete(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result autorest.Response, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Delete") defer func() { sc := -1 if result.Response != nil { sc = result.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "Delete", err.Error()) } req, err := client.DeletePreparer(ctx, resourceGroupName, accountName, containerName) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", nil, "Failure preparing request") return } resp, err := client.DeleteSender(req) if err != nil { result.Response = resp err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure sending request") return } result, err = client.DeleteResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Delete", resp, "Failure responding to request") } return } // DeletePreparer prepares the Delete request. func (client BlobContainersClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) DeleteSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. func (client BlobContainersClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } // DeleteImmutabilityPolicy aborts an unlocked immutability policy. The response of delete has // immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this operation. Deleting a locked // immutability policy is not allowed, only way is to delete the container after deleting all blobs inside the // container. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used // to apply the operation only if the immutability policy already exists. If omitted, this operation will // always be applied. func (client BlobContainersClient) DeleteImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.DeleteImmutabilityPolicy") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "DeleteImmutabilityPolicy", err.Error()) } req, err := client.DeleteImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", nil, "Failure preparing request") return } resp, err := client.DeleteImmutabilityPolicySender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure sending request") return } result, err = client.DeleteImmutabilityPolicyResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "DeleteImmutabilityPolicy", resp, "Failure responding to request") } return } // DeleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request. func (client BlobContainersClient) DeleteImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "immutabilityPolicyName": autorest.Encode("path", "default"), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), autorest.WithQueryParameters(queryParameters), autorest.WithHeader("If-Match", autorest.String(ifMatch))) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // DeleteImmutabilityPolicySender sends the DeleteImmutabilityPolicy request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) DeleteImmutabilityPolicySender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // DeleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. The method always // closes the http.Response Body. func (client BlobContainersClient) DeleteImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ExtendImmutabilityPolicy extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only // action allowed on a Locked policy will be this action. ETag in If-Match is required for this operation. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used // to apply the operation only if the immutability policy already exists. If omitted, this operation will // always be applied. // parameters - the ImmutabilityPolicy Properties that will be extended for a blob container. func (client BlobContainersClient) ExtendImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (result ImmutabilityPolicy, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.ExtendImmutabilityPolicy") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: parameters, Constraints: []validation.Constraint{{Target: "parameters", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty", Name: validation.Null, Rule: true, Chain: []validation.Constraint{{Target: "parameters.ImmutabilityPolicyProperty.ImmutabilityPeriodSinceCreationInDays", Name: validation.Null, Rule: true, Chain: nil}}}, }}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "ExtendImmutabilityPolicy", err.Error()) } req, err := client.ExtendImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch, parameters) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", nil, "Failure preparing request") return } resp, err := client.ExtendImmutabilityPolicySender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure sending request") return } result, err = client.ExtendImmutabilityPolicyResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "ExtendImmutabilityPolicy", resp, "Failure responding to request") } return } // ExtendImmutabilityPolicyPreparer prepares the ExtendImmutabilityPolicy request. func (client BlobContainersClient) ExtendImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string, parameters *ImmutabilityPolicy) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend", pathParameters), autorest.WithQueryParameters(queryParameters), autorest.WithHeader("If-Match", autorest.String(ifMatch))) if parameters != nil { preparer = autorest.DecoratePreparer(preparer, autorest.WithJSON(parameters)) } return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ExtendImmutabilityPolicySender sends the ExtendImmutabilityPolicy request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) ExtendImmutabilityPolicySender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // ExtendImmutabilityPolicyResponder handles the response to the ExtendImmutabilityPolicy request. The method always // closes the http.Response Body. func (client BlobContainersClient) ExtendImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Get gets properties of a specified container. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. func (client BlobContainersClient) Get(ctx context.Context, resourceGroupName string, accountName string, containerName string) (result BlobContainer, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Get") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "Get", err.Error()) } req, err := client.GetPreparer(ctx, resourceGroupName, accountName, containerName) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", nil, "Failure preparing request") return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure sending request") return } result, err = client.GetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. func (client BlobContainersClient) GetPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) GetSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. func (client BlobContainersClient) GetResponder(resp *http.Response) (result BlobContainer, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // GetImmutabilityPolicy gets the existing immutability policy along with the corresponding ETag in response headers // and body. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used // to apply the operation only if the immutability policy already exists. If omitted, this operation will // always be applied. func (client BlobContainersClient) GetImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.GetImmutabilityPolicy") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "GetImmutabilityPolicy", err.Error()) } req, err := client.GetImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", nil, "Failure preparing request") return } resp, err := client.GetImmutabilityPolicySender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure sending request") return } result, err = client.GetImmutabilityPolicyResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "GetImmutabilityPolicy", resp, "Failure responding to request") } return } // GetImmutabilityPolicyPreparer prepares the GetImmutabilityPolicy request. func (client BlobContainersClient) GetImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "immutabilityPolicyName": autorest.Encode("path", "default"), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}", pathParameters), autorest.WithQueryParameters(queryParameters)) if len(ifMatch) > 0 { preparer = autorest.DecoratePreparer(preparer, autorest.WithHeader("If-Match", autorest.String(ifMatch))) } return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetImmutabilityPolicySender sends the GetImmutabilityPolicy request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) GetImmutabilityPolicySender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // GetImmutabilityPolicyResponder handles the response to the GetImmutabilityPolicy request. The method always // closes the http.Response Body. func (client BlobContainersClient) GetImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Lease the Lease Container operation establishes and manages a lock on a container for delete operations. The lock // duration can be 15 to 60 seconds, or can be infinite. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // parameters - lease Container request body. func (client BlobContainersClient) Lease(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (result LeaseContainerResponse, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Lease") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "Lease", err.Error()) } req, err := client.LeasePreparer(ctx, resourceGroupName, accountName, containerName, parameters) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", nil, "Failure preparing request") return } resp, err := client.LeaseSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure sending request") return } result, err = client.LeaseResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Lease", resp, "Failure responding to request") } return } // LeasePreparer prepares the Lease request. func (client BlobContainersClient) LeasePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, parameters *LeaseContainerRequest) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease", pathParameters), autorest.WithQueryParameters(queryParameters)) if parameters != nil { preparer = autorest.DecoratePreparer(preparer, autorest.WithJSON(parameters)) } return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // LeaseSender sends the Lease request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) LeaseSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // LeaseResponder handles the response to the Lease request. The method always // closes the http.Response Body. func (client BlobContainersClient) LeaseResponder(resp *http.Response) (result LeaseContainerResponse, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // List lists all containers and does not support a prefix like data plane. Also SRP today does not return continuation // token. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // skipToken - optional. Continuation token for the list operation. // maxpagesize - optional. Specified maximum number of containers that can be included in the list. // filter - optional. When specified, only container names starting with the filter will be listed. func (client BlobContainersClient) List(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (result ListContainerItemsPage, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List") defer func() { sc := -1 if result.lci.Response.Response != nil { sc = result.lci.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "List", err.Error()) } result.fn = client.listNextResults req, err := client.ListPreparer(ctx, resourceGroupName, accountName, skipToken, maxpagesize, filter) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", nil, "Failure preparing request") return } resp, err := client.ListSender(req) if err != nil { result.lci.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure sending request") return } result.lci, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "List", resp, "Failure responding to request") } return } // ListPreparer prepares the List request. func (client BlobContainersClient) ListPreparer(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } if len(skipToken) > 0 { queryParameters["$skipToken"] = autorest.Encode("query", skipToken) } if len(maxpagesize) > 0 { queryParameters["$maxpagesize"] = autorest.Encode("query", maxpagesize) } if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) ListSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // ListResponder handles the response to the List request. The method always // closes the http.Response Body. func (client BlobContainersClient) ListResponder(resp *http.Response) (result ListContainerItems, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // listNextResults retrieves the next set of results, if any. func (client BlobContainersClient) listNextResults(ctx context.Context, lastResults ListContainerItems) (result ListContainerItems, err error) { req, err := lastResults.listContainerItemsPreparer(ctx) if err != nil { return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", nil, "Failure preparing next results request") } if req == nil { return } resp, err := client.ListSender(req) if err != nil { result.Response = autorest.Response{Response: resp} return result, autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure sending next results request") } result, err = client.ListResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "listNextResults", resp, "Failure responding to next results request") } return } // ListComplete enumerates all values, automatically crossing page boundaries as required. func (client BlobContainersClient) ListComplete(ctx context.Context, resourceGroupName string, accountName string, skipToken string, maxpagesize string, filter string) (result ListContainerItemsIterator, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.List") defer func() { sc := -1 if result.Response().Response.Response != nil { sc = result.page.Response().Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } result.page, err = client.List(ctx, resourceGroupName, accountName, skipToken, maxpagesize, filter) return } // LockImmutabilityPolicy sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is // ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // ifMatch - the entity state (ETag) version of the immutability policy to update. A value of "*" can be used // to apply the operation only if the immutability policy already exists. If omitted, this operation will // always be applied. func (client BlobContainersClient) LockImmutabilityPolicy(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (result ImmutabilityPolicy, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.LockImmutabilityPolicy") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "LockImmutabilityPolicy", err.Error()) } req, err := client.LockImmutabilityPolicyPreparer(ctx, resourceGroupName, accountName, containerName, ifMatch) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", nil, "Failure preparing request") return } resp, err := client.LockImmutabilityPolicySender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure sending request") return } result, err = client.LockImmutabilityPolicyResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "LockImmutabilityPolicy", resp, "Failure responding to request") } return } // LockImmutabilityPolicyPreparer prepares the LockImmutabilityPolicy request. func (client BlobContainersClient) LockImmutabilityPolicyPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, ifMatch string) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock", pathParameters), autorest.WithQueryParameters(queryParameters), autorest.WithHeader("If-Match", autorest.String(ifMatch))) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // LockImmutabilityPolicySender sends the LockImmutabilityPolicy request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) LockImmutabilityPolicySender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // LockImmutabilityPolicyResponder handles the response to the LockImmutabilityPolicy request. The method always // closes the http.Response Body. func (client BlobContainersClient) LockImmutabilityPolicyResponder(resp *http.Response) (result ImmutabilityPolicy, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // SetLegalHold sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold follows an // append pattern and does not clear out the existing tags that are not specified in the request. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // legalHold - the LegalHold property that will be set to a blob container. func (client BlobContainersClient) SetLegalHold(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (result LegalHold, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.SetLegalHold") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}, {TargetValue: legalHold, Constraints: []validation.Constraint{{Target: "legalHold.Tags", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "SetLegalHold", err.Error()) } req, err := client.SetLegalHoldPreparer(ctx, resourceGroupName, accountName, containerName, legalHold) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", nil, "Failure preparing request") return } resp, err := client.SetLegalHoldSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure sending request") return } result, err = client.SetLegalHoldResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "SetLegalHold", resp, "Failure responding to request") } return } // SetLegalHoldPreparer prepares the SetLegalHold request. func (client BlobContainersClient) SetLegalHoldPreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, legalHold LegalHold) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } legalHold.HasLegalHold = nil preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPost(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold", pathParameters), autorest.WithJSON(legalHold), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // SetLegalHoldSender sends the SetLegalHold request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) SetLegalHoldSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // SetLegalHoldResponder handles the response to the SetLegalHold request. The method always // closes the http.Response Body. func (client BlobContainersClient) SetLegalHoldResponder(resp *http.Response) (result LegalHold, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Update updates container properties as specified in request body. Properties not mentioned in the request will be // unchanged. Update fails if the specified container doesn't already exist. // Parameters: // resourceGroupName - the name of the resource group within the user's subscription. The name is case // insensitive. // accountName - the name of the storage account within the specified resource group. Storage account names // must be between 3 and 24 characters in length and use numbers and lower-case letters only. // containerName - the name of the blob container within the specified storage account. Blob container names // must be between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every // dash (-) character must be immediately preceded and followed by a letter or number. // blobContainer - properties to update for the blob container. func (client BlobContainersClient) Update(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (result BlobContainer, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/BlobContainersClient.Update") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: resourceGroupName, Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, {TargetValue: accountName, Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: containerName, Constraints: []validation.Constraint{{Target: "containerName", Name: validation.MaxLength, Rule: 63, Chain: nil}, {Target: "containerName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, {TargetValue: client.SubscriptionID, Constraints: []validation.Constraint{{Target: "client.SubscriptionID", Name: validation.MinLength, Rule: 1, Chain: nil}}}}); err != nil { return result, validation.NewError("storage.BlobContainersClient", "Update", err.Error()) } req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, containerName, blobContainer) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", nil, "Failure preparing request") return } resp, err := client.UpdateSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure sending request") return } result, err = client.UpdateResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "storage.BlobContainersClient", "Update", resp, "Failure responding to request") } return } // UpdatePreparer prepares the Update request. func (client BlobContainersClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, containerName string, blobContainer BlobContainer) (*http.Request, error) { pathParameters := map[string]interface{}{ "accountName": autorest.Encode("path", accountName), "containerName": autorest.Encode("path", containerName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-04-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPatch(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}", pathParameters), autorest.WithJSON(blobContainer), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client BlobContainersClient) UpdateSender(req *http.Request) (*http.Response, error) { sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) return autorest.SendWithSender(client, req, sd...) } // UpdateResponder handles the response to the Update request. The method always // closes the http.Response Body. func (client BlobContainersClient) UpdateResponder(resp *http.Response) (result BlobContainer, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }
vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-04-01/storage/blobcontainers.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.000358894991222769, 0.00017417847993783653, 0.00016096136823762208, 0.00017312778800260276, 0.00001733901081024669 ]
{ "id": 3, "code_window": [ "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", "\t\"k8s.io/klog\"\n", ")\n", "\n", "var (\n", "\tauthenticatedUserCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go", "type": "add", "edit_start_line_idx": 34 }
// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package internal contains support packages for oauth2 package. package internal
vendor/golang.org/x/oauth2/internal/doc.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017947491141967475, 0.00017947491141967475, 0.00017947491141967475, 0.00017947491141967475, 0 ]
{ "id": 4, "code_window": [ "\tAPIServerComponent string = \"apiserver\"\n", ")\n", "\n", "var (\n", "\t// TODO(a-robinson): Add unit tests for the handling of these metrics once\n", "\t// the upstream library supports it.\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go", "type": "add", "edit_start_line_idx": 54 }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metrics import ( "sync" "time" compbasemetrics "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) var ( etcdRequestLatency = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "etcd_request_duration_seconds", Help: "Etcd request latency in seconds for each operation and object type.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"operation", "type"}, ) objectCounts = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "etcd_object_counts", Help: "Number of stored objects at the time of last check split by kind.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"resource"}, ) deprecatedEtcdRequestLatenciesSummary = compbasemetrics.NewSummaryVec( &compbasemetrics.SummaryOpts{ Name: "etcd_request_latencies_summary", Help: "(Deprecated) Etcd request latency summary in microseconds for each operation and object type.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"operation", "type"}, ) ) var registerMetrics sync.Once // Register all metrics. func Register() { // Register the metrics. registerMetrics.Do(func() { legacyregistry.MustRegister(etcdRequestLatency) legacyregistry.MustRegister(objectCounts) // TODO(danielqsj): Remove the following metrics, they are deprecated legacyregistry.MustRegister(deprecatedEtcdRequestLatenciesSummary) }) } // UpdateObjectCount sets the etcd_object_counts metric. func UpdateObjectCount(resourcePrefix string, count int64) { objectCounts.WithLabelValues(resourcePrefix).Set(float64(count)) } // RecordEtcdRequestLatency sets the etcd_request_duration_seconds metrics. func RecordEtcdRequestLatency(verb, resource string, startTime time.Time) { etcdRequestLatency.WithLabelValues(verb, resource).Observe(sinceInSeconds(startTime)) deprecatedEtcdRequestLatenciesSummary.WithLabelValues(verb, resource).Observe(sinceInMicroseconds(startTime)) } // Reset resets the etcd_request_duration_seconds metric. func Reset() { etcdRequestLatency.Reset() deprecatedEtcdRequestLatenciesSummary.Reset() } // sinceInMicroseconds gets the time since the specified start in microseconds. func sinceInMicroseconds(start time.Time) float64 { return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) } // sinceInSeconds gets the time since the specified start in seconds. func sinceInSeconds(start time.Time) float64 { return time.Since(start).Seconds() }
staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.8766420483589172, 0.08820336312055588, 0.0001693123922450468, 0.00018975924467667937, 0.26281461119651794 ]
{ "id": 4, "code_window": [ "\tAPIServerComponent string = \"apiserver\"\n", ")\n", "\n", "var (\n", "\t// TODO(a-robinson): Add unit tests for the handling of these metrics once\n", "\t// the upstream library supports it.\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go", "type": "add", "edit_start_line_idx": 54 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package servicecatalog import "github.com/onsi/ginkgo" // SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-service-catalog] "+text, body) }
test/e2e/servicecatalog/framework.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017823153757490218, 0.00017465541895944625, 0.00016797211719676852, 0.00017776258755475283, 0.000004729680767923128 ]
{ "id": 4, "code_window": [ "\tAPIServerComponent string = \"apiserver\"\n", ")\n", "\n", "var (\n", "\t// TODO(a-robinson): Add unit tests for the handling of these metrics once\n", "\t// the upstream library supports it.\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go", "type": "add", "edit_start_line_idx": 54 }
{% panel style="success", title="Providing Feedback" %} **Provide feedback at the [survey](https://www.surveymonkey.com/r/JH35X82)** {% endpanel %} {% panel style="info", title="TL;DR" %} - A Kubernetes API has 2 parts - a Resource Type and a Controller - Resources are objects declared as json or yaml and written to a cluster - Controllers asynchronously actuate Resources after they are stored {% endpanel %} # Kubernetes Resources and Controllers Overview This section provides background on the Kubernetes Resource model. This information is also available at the [kubernetes.io](https://kubernetes.io/docs/home/) docs site. For more information on Kubernetes Resources see: [kubernetes.io Concepts](https://kubernetes.io/docs/concepts/). ## Resources Instances of Kubernetes objects (e.g. Deployment, Services, Namespaces, etc) are called **Resources**. Resources which run containers are referred to as **Workloads**. Examples of Workloads: - [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) - [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) - [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) - [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) - [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) **Users work with Resource APIs by declaring them in files which are then Applied to a Kubernetes cluster. These declarative files are called Resource Config.** Resource Config is *Applied* (declarative Create/Update/Delete) to a Kubernetes cluster using tools such as Kubectl, and then actuated by a *Controller*. Resources are uniquely identified: - **apiVersion** (API Type Group and Version) - **kind** (API Type Name) - **metadata.namespace** (Instance namespace) - **metadata.name** (Instance name) {% panel style="warning", title="Default Namespace" %} If namespace is omitted from the Resource Config, the *default* namespace is used. Users should almost always explicitly specify the namespace for their Application using a `kustomization.yaml`. {% endpanel %} {% method %} ### Resources Structure Resources have the following components. **TypeMeta:** Resource Type **apiVersion** and **kind**. **ObjectMeta:** Resource **name** and **namespace** + other metadata (labels, annotations, etc). **Spec:** the desired state of the Resource - intended state the user provides to the cluster. **Status:** the observed state of the object - recorded state the cluster provides to the user. Resource Config written by the user omits the Status field. **Example Deployment Resource Config** {% sample lang="yaml" %} ```yaml apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment labels: app: nginx spec: replicas: 3 selector: matchLabels: app: nginx template: metadata: labels: app: nginx spec: containers: - name: nginx image: nginx:1.15.4 ``` {% endmethod %} {% panel style="info", title="Spec and Status" %} Resources such as ConfigMaps and Secrets do not have a Status, and as a result their Spec is implicit (i.e. they don't have a spec field). {% endpanel %} ## Controllers Controllers actuate Kubernetes APIs. They observe the state of the system and look for changes either to desired state of Resources (create, update, delete) or the system (Pod or Node dies). Controllers then make changes to the cluster to fulfill the intent specified by the user (e.g. in Resource Config) or automation (e.g. changes from Autoscalers). **Example:** After a user creates a Deployment, the Deployment Controller will see that the Deployment exists and verify that the corresponding ReplicaSet it expects to find exists. The Controller will see that the ReplicaSet does not exist and will create one. {% panel style="warning", title="Asynchronous Actuation" %} Because Controllers run asynchronously, issues such as a bad Container Image or unschedulable Pods will not be present in the CRUD response. Tooling must facilitate processes for watching the state of the system until changes are completely actuated by Controllers. Once the changes have been fully actuated such that the desired state matches the observed state, the Resource is considered *Settled*. {% endpanel %} ### Controller Structure **Reconcile** Controllers actuate Resources by reading the Resource they are Reconciling + related Resources, such as those that they create and delete. **Controllers *do not* Reconcile events, rather they Reconcile the expected cluster state to the observed cluster state at the time Reconcile is run.** 1. Deployment Controller creates/deletes ReplicaSets 1. ReplicaSet Controller creates/delete Pods 1. Scheduler (Controller) writes Nodes to Pods 1. Node (Controller) runs Containers specifid in Pods on the Node **Watch** Controllers actuate Resources *after* they are written by Watching Resource Types, and then triggering Reconciles from Events. After a Resource is created/updated/deleted, Controllers Watching the Resource Type will receive a notification that the Resource has been changed, and they will read the state of the system to see what has changed (instead of relying on the Event for this information). - Deployment Controller watches Deployments + ReplicaSets (+ Pods) - ReplicaSet Controller watches ReplicaSets + Pods - Scheduler (Controller) watches Pods - Node (Controller) watches Pods (+ Secrets + ConfigMaps) {% panel style="info", title="Level vs Edge Based Reconciliation" %} Because Controllers don't respond to individual Events, but instead Reconcile the state of the system at the time that Reconcile is run, **changes from several different events may be observed and Reconciled together.** This is referred to as a *Level Based* system, whereas a system that responds to each event individually would be referred to as an *Edge Based* system. {% endpanel %} ## Overview of Kubernetes Resource APIs ### Pods Containers are run in [*Pods*](https://kubernetes.io/docs/concepts/workloads/pods/pod-overview/) which are scheduled to run on *Nodes* (i.e. worker machines) in a cluster. Pods run a *single replica* of an Application and provide: - Compute Resources (cpu, memory, disk) - Environment Variables - Readiness and Health Checking - Network (IP address shared by containers in the Pod) - Mounting Shared Configuration and Secrets - Mounting Storage Volumes - Initialization {% panel style="warning", title="Multi Container Pods" %} Multiple replicas of an Application should be created using a Workload API to manage creation and deletion of Pod replicas using a PodTemplate. In some cases a Pod may contain multiple Containers forming a single instance of an Application. These containers may coordinate with one another through shared network (IP) and storage. {% endpanel %} ### Workloads Pods are typically managed by higher level abstractions that handle concerns such as replication, identity, persistent storage, custom scheduling, rolling updates, etc. The most common out-of-the-box Workload APIs (manage Pods) are: - [Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) (Stateless Applications) - replication + rollouts - [StatefulSets](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) (Stateful Applications) - replication + rollouts + persistent storage + identity - [Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) (Batch Work) - run to completion - [CronJobs](https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/) (Scheduled Batch Work) - scheduled run to completion - [DaemonSets](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) (Per-Machine) - per-Node scheduling {% panel style="success", title="API Abstraction Layers" %} High-level Workload APIs may manage lower-level Workload APIs instead of directly managing Pods (e.g. Deployments manage ReplicaSets). {% endpanel %} ### Service Discovery and Load Balancing Service discovery and Load Balancing may be managed by a *Service* object. Services provide a single virtual IP address and dns name load balanced to a collection of Pods matching Labels. {% panel style="info", title="Internal vs External Services" %} - [Services Resources](https://kubernetes.io/docs/concepts/services-networking/service/) (L4) may expose Pods internally within a cluster or externally through an HA proxy. - [Ingress Resources](https://kubernetes.io/docs/concepts/services-networking/ingress/) (L7) may expose URI endpoints and route them to Services. {% endpanel %} ### Configuration and Secrets Shared Configuration and Secret data may be provided by ConfigMaps and Secrets. This allows Environment Variables, Command Line Arguments and Files to be loosely injected into the Pods and Containers that consume them. {% panel style="info", title="ConfigMaps vs Secrets" %} - [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) are for providing non-sensitive data to Pods. - [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/) are for providing sensitive data to Pods. {% endpanel %}
staging/src/k8s.io/kubectl/docs/book/pages/kubectl_book/resources_and_controllers.md
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017487118020653725, 0.0001672559737926349, 0.0001628973986953497, 0.00016657142259646207, 0.000002987989091707277 ]
{ "id": 4, "code_window": [ "\tAPIServerComponent string = \"apiserver\"\n", ")\n", "\n", "var (\n", "\t// TODO(a-robinson): Add unit tests for the handling of these metrics once\n", "\t// the upstream library supports it.\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go", "type": "add", "edit_start_line_idx": 54 }
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: metrics-server:system:auth-delegator labels: kubernetes.io/cluster-service: "true" addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system
cluster/addons/metrics-server/auth-delegator.yaml
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00019289969350211322, 0.00017847763956524432, 0.00016405557107646018, 0.00017847763956524432, 0.00001442206121282652 ]
{ "id": 5, "code_window": [ "\t\"k8s.io/klog\"\n", "\tutiltrace \"k8s.io/utils/trace\"\n", ")\n", "\n", "var (\n", "\tinitCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go", "type": "add", "edit_start_line_idx": 47 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package filters import ( "errors" "net/http" "strings" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apiserver/pkg/authentication/authenticator" "k8s.io/apiserver/pkg/endpoints/handlers/responsewriters" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog" ) var ( authenticatedUserCounter = metrics.NewCounterVec( &metrics.CounterOpts{ Name: "authenticated_user_requests", Help: "Counter of authenticated requests broken out by username.", StabilityLevel: metrics.ALPHA, }, []string{"username"}, ) ) func init() { legacyregistry.MustRegister(authenticatedUserCounter) } // WithAuthentication creates an http handler that tries to authenticate the given request as a user, and then // stores any such user found onto the provided context for the request. If authentication fails or returns an error // the failed handler is used. On success, "Authorization" header is removed from the request and handler // is invoked to serve the request. func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences) http.Handler { if auth == nil { klog.Warningf("Authentication is disabled") return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if len(apiAuds) > 0 { req = req.WithContext(authenticator.WithAudiences(req.Context(), apiAuds)) } resp, ok, err := auth.AuthenticateRequest(req) if err != nil || !ok { if err != nil { klog.Errorf("Unable to authenticate the request due to an error: %v", err) } failed.ServeHTTP(w, req) return } // TODO(mikedanese): verify the response audience matches one of apiAuds if // non-empty // authorization header is not required anymore in case of a successful authentication. req.Header.Del("Authorization") req = req.WithContext(genericapirequest.WithUser(req.Context(), resp.User)) authenticatedUserCounter.WithLabelValues(compressUsername(resp.User.GetName())).Inc() handler.ServeHTTP(w, req) }) } func Unauthorized(s runtime.NegotiatedSerializer, supportsBasicAuth bool) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { if supportsBasicAuth { w.Header().Set("WWW-Authenticate", `Basic realm="kubernetes-master"`) } ctx := req.Context() requestInfo, found := genericapirequest.RequestInfoFrom(ctx) if !found { responsewriters.InternalError(w, req, errors.New("no RequestInfo found in the context")) return } gv := schema.GroupVersion{Group: requestInfo.APIGroup, Version: requestInfo.APIVersion} responsewriters.ErrorNegotiated(apierrors.NewUnauthorized("Unauthorized"), s, gv, w, req) }) } // compressUsername maps all possible usernames onto a small set of categories // of usernames. This is done both to limit the cardinality of the // authorized_user_requests metric, and to avoid pushing actual usernames in the // metric. func compressUsername(username string) string { switch { // Known internal identities. case username == "admin" || username == "client" || username == "kube_proxy" || username == "kubelet" || username == "system:serviceaccount:kube-system:default": return username // Probably an email address. case strings.Contains(username, "@"): return "email_id" // Anything else (custom service accounts, custom external identities, etc.) default: return "other" } }
staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.12083201855421066, 0.00966632179915905, 0.0001645655429456383, 0.00017474866763222963, 0.03209906443953514 ]
{ "id": 5, "code_window": [ "\t\"k8s.io/klog\"\n", "\tutiltrace \"k8s.io/utils/trace\"\n", ")\n", "\n", "var (\n", "\tinitCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go", "type": "add", "edit_start_line_idx": 47 }
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1beta1 import ( conversion "k8s.io/apimachinery/pkg/conversion" clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" ) func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { return nil }
staging/src/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.0002693291462492198, 0.0002075040974887088, 0.0001742726017255336, 0.00017891055904328823, 0.00004375788921606727 ]
{ "id": 5, "code_window": [ "\t\"k8s.io/klog\"\n", "\tutiltrace \"k8s.io/utils/trace\"\n", ")\n", "\n", "var (\n", "\tinitCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go", "type": "add", "edit_start_line_idx": 47 }
/* Copyright The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by lister-gen. DO NOT EDIT. package v1beta1 import ( v1beta1 "k8s.io/api/authentication/v1beta1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) // TokenReviewLister helps list TokenReviews. type TokenReviewLister interface { // List lists all TokenReviews in the indexer. List(selector labels.Selector) (ret []*v1beta1.TokenReview, err error) // Get retrieves the TokenReview from the index for a given name. Get(name string) (*v1beta1.TokenReview, error) TokenReviewListerExpansion } // tokenReviewLister implements the TokenReviewLister interface. type tokenReviewLister struct { indexer cache.Indexer } // NewTokenReviewLister returns a new TokenReviewLister. func NewTokenReviewLister(indexer cache.Indexer) TokenReviewLister { return &tokenReviewLister{indexer: indexer} } // List lists all TokenReviews in the indexer. func (s *tokenReviewLister) List(selector labels.Selector) (ret []*v1beta1.TokenReview, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { ret = append(ret, m.(*v1beta1.TokenReview)) }) return ret, err } // Get retrieves the TokenReview from the index for a given name. func (s *tokenReviewLister) Get(name string) (*v1beta1.TokenReview, error) { obj, exists, err := s.indexer.GetByKey(name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(v1beta1.Resource("tokenreview"), name) } return obj.(*v1beta1.TokenReview), nil }
staging/src/k8s.io/client-go/listers/authentication/v1beta1/tokenreview.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017874430341180414, 0.00017080060206353664, 0.00016328213678207248, 0.00017045278218574822, 0.000005770264579041395 ]
{ "id": 5, "code_window": [ "\t\"k8s.io/klog\"\n", "\tutiltrace \"k8s.io/utils/trace\"\n", ")\n", "\n", "var (\n", "\tinitCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go", "type": "add", "edit_start_line_idx": 47 }
// Copyright 2013 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package model import ( "encoding/json" "fmt" "regexp" "strings" "unicode/utf8" ) const ( // AlertNameLabel is the name of the label containing the an alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in // exported metrics if a label of the same name is added by the server. ExportedLabelPrefix = "exported_" // MetricNameLabel is the label name indicating the metric name of a // timeseries. MetricNameLabel = "__name__" // SchemeLabel is the name of the label that holds the scheme on which to // scrape a target. SchemeLabel = "__scheme__" // AddressLabel is the name of the label that holds the address of // a scrape target. AddressLabel = "__address__" // MetricsPathLabel is the name of the label that holds the path on which to // scrape a target. MetricsPathLabel = "__metrics_path__" // ReservedLabelPrefix is a prefix which is not legal in user-supplied // label names. ReservedLabelPrefix = "__" // MetaLabelPrefix is a prefix for labels that provide meta information. // Labels with this prefix are used for intermediate label processing and // will not be attached to time series. MetaLabelPrefix = "__meta_" // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. // Labels with this prefix are used for intermediate label processing and // will not be attached to time series. This is reserved for use in // Prometheus configuration files by users. TmpLabelPrefix = "__tmp_" // ParamLabelPrefix is a prefix for labels that provide URL parameters // used to scrape a target. ParamLabelPrefix = "__param_" // JobLabel is the label name indicating the job from which a timeseries // was scraped. JobLabel = "job" // InstanceLabel is the label name used for the instance label. InstanceLabel = "instance" // BucketLabel is used for the label that defines the upper bound of a // bucket of a histogram ("le" -> "less or equal"). BucketLabel = "le" // QuantileLabel is used for the label that defines the quantile in a // summary. QuantileLabel = "quantile" ) // LabelNameRE is a regular expression matching valid label names. Note that the // IsValid method of LabelName performs the same check but faster than a match // with this regular expression. var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // A LabelName is a key for a LabelSet or Metric. It has a value associated // therewith. type LabelName string // IsValid is true iff the label name matches the pattern of LabelNameRE. This // method, however, does not use LabelNameRE for the check but a much faster // hardcoded implementation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } for i, b := range ln { if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { return false } } return true } // UnmarshalYAML implements the yaml.Unmarshaler interface. func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string if err := unmarshal(&s); err != nil { return err } if !LabelName(s).IsValid() { return fmt.Errorf("%q is not a valid label name", s) } *ln = LabelName(s) return nil } // UnmarshalJSON implements the json.Unmarshaler interface. func (ln *LabelName) UnmarshalJSON(b []byte) error { var s string if err := json.Unmarshal(b, &s); err != nil { return err } if !LabelName(s).IsValid() { return fmt.Errorf("%q is not a valid label name", s) } *ln = LabelName(s) return nil } // LabelNames is a sortable LabelName slice. In implements sort.Interface. type LabelNames []LabelName func (l LabelNames) Len() int { return len(l) } func (l LabelNames) Less(i, j int) bool { return l[i] < l[j] } func (l LabelNames) Swap(i, j int) { l[i], l[j] = l[j], l[i] } func (l LabelNames) String() string { labelStrings := make([]string, 0, len(l)) for _, label := range l { labelStrings = append(labelStrings, string(label)) } return strings.Join(labelStrings, ", ") } // A LabelValue is an associated value for a LabelName. type LabelValue string // IsValid returns true iff the string is a valid UTF8. func (lv LabelValue) IsValid() bool { return utf8.ValidString(string(lv)) } // LabelValues is a sortable LabelValue slice. It implements sort.Interface. type LabelValues []LabelValue func (l LabelValues) Len() int { return len(l) } func (l LabelValues) Less(i, j int) bool { return string(l[i]) < string(l[j]) } func (l LabelValues) Swap(i, j int) { l[i], l[j] = l[j], l[i] } // LabelPair pairs a name with a value. type LabelPair struct { Name LabelName Value LabelValue } // LabelPairs is a sortable slice of LabelPair pointers. It implements // sort.Interface. type LabelPairs []*LabelPair func (l LabelPairs) Len() int { return len(l) } func (l LabelPairs) Less(i, j int) bool { switch { case l[i].Name > l[j].Name: return false case l[i].Name < l[j].Name: return true case l[i].Value > l[j].Value: return false case l[i].Value < l[j].Value: return true default: return false } } func (l LabelPairs) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
vendor/github.com/prometheus/common/model/labels.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017942408157978207, 0.00017098708485718817, 0.00016384701302740723, 0.00016997571219690144, 0.000004018331765109906 ]
{ "id": 6, "code_window": [ "\tcompbasemetrics \"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var (\n", "\tetcdRequestLatency = compbasemetrics.NewHistogramVec(\n", "\t\t&compbasemetrics.HistogramOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go", "type": "add", "edit_start_line_idx": 26 }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package metrics import ( "sync" "time" compbasemetrics "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) var ( etcdRequestLatency = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Name: "etcd_request_duration_seconds", Help: "Etcd request latency in seconds for each operation and object type.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"operation", "type"}, ) objectCounts = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ Name: "etcd_object_counts", Help: "Number of stored objects at the time of last check split by kind.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"resource"}, ) deprecatedEtcdRequestLatenciesSummary = compbasemetrics.NewSummaryVec( &compbasemetrics.SummaryOpts{ Name: "etcd_request_latencies_summary", Help: "(Deprecated) Etcd request latency summary in microseconds for each operation and object type.", StabilityLevel: compbasemetrics.ALPHA, }, []string{"operation", "type"}, ) ) var registerMetrics sync.Once // Register all metrics. func Register() { // Register the metrics. registerMetrics.Do(func() { legacyregistry.MustRegister(etcdRequestLatency) legacyregistry.MustRegister(objectCounts) // TODO(danielqsj): Remove the following metrics, they are deprecated legacyregistry.MustRegister(deprecatedEtcdRequestLatenciesSummary) }) } // UpdateObjectCount sets the etcd_object_counts metric. func UpdateObjectCount(resourcePrefix string, count int64) { objectCounts.WithLabelValues(resourcePrefix).Set(float64(count)) } // RecordEtcdRequestLatency sets the etcd_request_duration_seconds metrics. func RecordEtcdRequestLatency(verb, resource string, startTime time.Time) { etcdRequestLatency.WithLabelValues(verb, resource).Observe(sinceInSeconds(startTime)) deprecatedEtcdRequestLatenciesSummary.WithLabelValues(verb, resource).Observe(sinceInMicroseconds(startTime)) } // Reset resets the etcd_request_duration_seconds metric. func Reset() { etcdRequestLatency.Reset() deprecatedEtcdRequestLatenciesSummary.Reset() } // sinceInMicroseconds gets the time since the specified start in microseconds. func sinceInMicroseconds(start time.Time) float64 { return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds()) } // sinceInSeconds gets the time since the specified start in seconds. func sinceInSeconds(start time.Time) float64 { return time.Since(start).Seconds() }
staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.9991618394851685, 0.5389034152030945, 0.0001678541157161817, 0.6981440782546997, 0.45826467871665955 ]
{ "id": 6, "code_window": [ "\tcompbasemetrics \"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var (\n", "\tetcdRequestLatency = compbasemetrics.NewHistogramVec(\n", "\t\t&compbasemetrics.HistogramOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go", "type": "add", "edit_start_line_idx": 26 }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package deployment import ( "fmt" "reflect" "time" "k8s.io/klog" apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/controller/deployment/util" ) // syncRolloutStatus updates the status of a deployment during a rollout. There are // cases this helper will run that cannot be prevented from the scaling detection, // for example a resync of the deployment after it was scaled up. In those cases, // we shouldn't try to estimate any progress. func (dc *DeploymentController) syncRolloutStatus(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet, d *apps.Deployment) error { newStatus := calculateStatus(allRSs, newRS, d) // If there is no progressDeadlineSeconds set, remove any Progressing condition. if !util.HasProgressDeadline(d) { util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing) } // If there is only one replica set that is active then that means we are not running // a new rollout and this is a resync where we don't need to estimate any progress. // In such a case, we should simply not estimate any progress for this deployment. currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) isCompleteDeployment := newStatus.Replicas == newStatus.UpdatedReplicas && currentCond != nil && currentCond.Reason == util.NewRSAvailableReason // Check for progress only if there is a progress deadline set and the latest rollout // hasn't completed yet. if util.HasProgressDeadline(d) && !isCompleteDeployment { switch { case util.DeploymentComplete(d, &newStatus): // Update the deployment conditions with a message for the new replica set that // was successfully deployed. If the condition already exists, we ignore this update. msg := fmt.Sprintf("Deployment %q has successfully progressed.", d.Name) if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q has successfully progressed.", newRS.Name) } condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg) util.SetDeploymentCondition(&newStatus, *condition) case util.DeploymentProgressing(d, &newStatus): // If there is any progress made, continue by not checking if the deployment failed. This // behavior emulates the rolling updater progressDeadline check. msg := fmt.Sprintf("Deployment %q is progressing.", d.Name) if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q is progressing.", newRS.Name) } condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg) // Update the current Progressing condition or add a new one if it doesn't exist. // If a Progressing condition with status=true already exists, we should update // everything but lastTransitionTime. SetDeploymentCondition already does that but // it also is not updating conditions when the reason of the new condition is the // same as the old. The Progressing condition is a special case because we want to // update with the same reason and change just lastUpdateTime iff we notice any // progress. That's why we handle it here. if currentCond != nil { if currentCond.Status == v1.ConditionTrue { condition.LastTransitionTime = currentCond.LastTransitionTime } util.RemoveDeploymentCondition(&newStatus, apps.DeploymentProgressing) } util.SetDeploymentCondition(&newStatus, *condition) case util.DeploymentTimedOut(d, &newStatus): // Update the deployment with a timeout condition. If the condition already exists, // we ignore this update. msg := fmt.Sprintf("Deployment %q has timed out progressing.", d.Name) if newRS != nil { msg = fmt.Sprintf("ReplicaSet %q has timed out progressing.", newRS.Name) } condition := util.NewDeploymentCondition(apps.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg) util.SetDeploymentCondition(&newStatus, *condition) } } // Move failure conditions of all replica sets in deployment conditions. For now, // only one failure condition is returned from getReplicaFailures. if replicaFailureCond := dc.getReplicaFailures(allRSs, newRS); len(replicaFailureCond) > 0 { // There will be only one ReplicaFailure condition on the replica set. util.SetDeploymentCondition(&newStatus, replicaFailureCond[0]) } else { util.RemoveDeploymentCondition(&newStatus, apps.DeploymentReplicaFailure) } // Do not update if there is nothing new to add. if reflect.DeepEqual(d.Status, newStatus) { // Requeue the deployment if required. dc.requeueStuckDeployment(d, newStatus) return nil } newDeployment := d newDeployment.Status = newStatus _, err := dc.client.AppsV1().Deployments(newDeployment.Namespace).UpdateStatus(newDeployment) return err } // getReplicaFailures will convert replica failure conditions from replica sets // to deployment conditions. func (dc *DeploymentController) getReplicaFailures(allRSs []*apps.ReplicaSet, newRS *apps.ReplicaSet) []apps.DeploymentCondition { var conditions []apps.DeploymentCondition if newRS != nil { for _, c := range newRS.Status.Conditions { if c.Type != apps.ReplicaSetReplicaFailure { continue } conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c)) } } // Return failures for the new replica set over failures from old replica sets. if len(conditions) > 0 { return conditions } for i := range allRSs { rs := allRSs[i] if rs == nil { continue } for _, c := range rs.Status.Conditions { if c.Type != apps.ReplicaSetReplicaFailure { continue } conditions = append(conditions, util.ReplicaSetToDeploymentCondition(c)) } } return conditions } // used for unit testing var nowFn = func() time.Time { return time.Now() } // requeueStuckDeployment checks whether the provided deployment needs to be synced for a progress // check. It returns the time after the deployment will be requeued for the progress check, 0 if it // will be requeued now, or -1 if it does not need to be requeued. func (dc *DeploymentController) requeueStuckDeployment(d *apps.Deployment, newStatus apps.DeploymentStatus) time.Duration { currentCond := util.GetDeploymentCondition(d.Status, apps.DeploymentProgressing) // Can't estimate progress if there is no deadline in the spec or progressing condition in the current status. if !util.HasProgressDeadline(d) || currentCond == nil { return time.Duration(-1) } // No need to estimate progress if the rollout is complete or already timed out. if util.DeploymentComplete(d, &newStatus) || currentCond.Reason == util.TimedOutReason { return time.Duration(-1) } // If there is no sign of progress at this point then there is a high chance that the // deployment is stuck. We should resync this deployment at some point in the future[1] // and check whether it has timed out. We definitely need this, otherwise we depend on the // controller resync interval. See https://github.com/kubernetes/kubernetes/issues/34458. // // [1] ProgressingCondition.LastUpdatedTime + progressDeadlineSeconds - time.Now() // // For example, if a Deployment updated its Progressing condition 3 minutes ago and has a // deadline of 10 minutes, it would need to be resynced for a progress check after 7 minutes. // // lastUpdated: 00:00:00 // now: 00:03:00 // progressDeadlineSeconds: 600 (10 minutes) // // lastUpdated + progressDeadlineSeconds - now => 00:00:00 + 00:10:00 - 00:03:00 => 07:00 after := currentCond.LastUpdateTime.Time.Add(time.Duration(*d.Spec.ProgressDeadlineSeconds) * time.Second).Sub(nowFn()) // If the remaining time is less than a second, then requeue the deployment immediately. // Make it ratelimited so we stay on the safe side, eventually the Deployment should // transition either to a Complete or to a TimedOut condition. if after < time.Second { klog.V(4).Infof("Queueing up deployment %q for a progress check now", d.Name) dc.enqueueRateLimited(d) return time.Duration(0) } klog.V(4).Infof("Queueing up deployment %q for a progress check after %ds", d.Name, int(after.Seconds())) // Add a second to avoid milliseconds skew in AddAfter. // See https://github.com/kubernetes/kubernetes/issues/39785#issuecomment-279959133 for more info. dc.enqueueAfter(d, after+time.Second) return after }
pkg/controller/deployment/progress.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00019052154675591737, 0.00017181885777972639, 0.00016475126903969795, 0.00017104104335885495, 0.000005260321813693736 ]
{ "id": 6, "code_window": [ "\tcompbasemetrics \"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var (\n", "\tetcdRequestLatency = compbasemetrics.NewHistogramVec(\n", "\t\t&compbasemetrics.HistogramOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go", "type": "add", "edit_start_line_idx": 26 }
# Implementations ## Resource Metrics API - [Heapster](https://github.com/kubernetes/heapster): a application which gathers metrics, writes them to metrics storage "sinks", and exposes the resource metrics API from in-memory storage. - [Metrics Server](https://github.com/kubernetes-incubator/metrics-server): a lighter-weight in-memory server specifically for the resource metrics API. ## Custom Metrics API ***NB: None of the below implementations are officially part of Kubernetes. They are listed here for convenience.*** - [Prometheus Adapter](https://github.com/directxman12/k8s-prometheus-adapter). An implementation of the custom metrics API that attempts to support arbitrary metrics following a set label and naming scheme. - [Microsoft Azure Adapter](https://github.com/Azure/azure-k8s-metrics-adapter). An implementation of the custom metrics API that allows you to retrieve arbitrary metrics from Azure Monitor. - [Google Stackdriver (coming soon)](https://github.com/GoogleCloudPlatform/k8s-stackdriver) - [Datadog Cluster Agent](https://github.com/DataDog/datadog-agent/blob/c4f38af1897bac294d8fed6285098b14aafa6178/docs/cluster-agent/CUSTOM_METRICS_SERVER.md). Implementation of the external metrics provider, using Datadog as a backend for the metrics. Coming soon: Implementation of the custom metrics provider to support in-cluster metrics collected by the Datadog Agents. - [Kube Metrics Adapter](https://github.com/zalando-incubator/kube-metrics-adapter). A general purpose metrics adapter for Kubernetes that can collect and serve custom and external metrics for Horizontal Pod Autoscaling. Provides the ability to scrape pods directly or from Prometheus through user defined queries. Also capable of serving external metrics from a number of sources including AWS' SQS and [ZMON monitoring](https://github.com/zalando/zmon).
staging/src/k8s.io/metrics/IMPLEMENTATIONS.md
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00016534599126316607, 0.00016238685930147767, 0.0001606281875865534, 0.00016178665100596845, 0.0000017859505305750645 ]
{ "id": 6, "code_window": [ "\tcompbasemetrics \"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var (\n", "\tetcdRequestLatency = compbasemetrics.NewHistogramVec(\n", "\t\t&compbasemetrics.HistogramOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go", "type": "add", "edit_start_line_idx": 26 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package credentials import ( "encoding/base64" "errors" "fmt" "net/url" "regexp" "strings" "sync" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ecr" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/klog" "k8s.io/kubernetes/pkg/credentialprovider" "k8s.io/kubernetes/pkg/version" ) var ecrPattern = regexp.MustCompile(`^(\d{12})\.dkr\.ecr(\-fips)?\.([a-zA-Z0-9][a-zA-Z0-9-_]*)\.amazonaws\.com(\.cn)?$`) // init registers a credential provider for each registryURLTemplate and creates // an ECR token getter factory with a new cache to store token getters func init() { credentialprovider.RegisterCredentialProvider("amazon-ecr", newECRProvider(&ecrTokenGetterFactory{cache: make(map[string]tokenGetter)})) } // ecrProvider is a DockerConfigProvider that gets and refreshes tokens // from AWS to access ECR. type ecrProvider struct { cache cache.Store getterFactory tokenGetterFactory } var _ credentialprovider.DockerConfigProvider = &ecrProvider{} func newECRProvider(getterFactory tokenGetterFactory) *ecrProvider { return &ecrProvider{ cache: cache.NewExpirationStore(stringKeyFunc, &ecrExpirationPolicy{}), getterFactory: getterFactory, } } // Enabled implements DockerConfigProvider.Enabled. Enabled is true if AWS // credentials are found. func (p *ecrProvider) Enabled() bool { sess, err := session.NewSessionWithOptions(session.Options{ SharedConfigState: session.SharedConfigEnable, }) if err != nil { klog.Errorf("while validating AWS credentials %v", err) return false } if _, err := sess.Config.Credentials.Get(); err != nil { klog.Errorf("while getting AWS credentials %v", err) return false } return true } // Provide returns a DockerConfig with credentials from the cache if they are // found, or from ECR func (p *ecrProvider) Provide(image string) credentialprovider.DockerConfig { parsed, err := parseRepoURL(image) if err != nil { klog.V(3).Info(err) return credentialprovider.DockerConfig{} } if cfg, exists := p.getFromCache(parsed); exists { klog.V(6).Infof("Got ECR credentials from cache for %s", parsed.registry) return cfg } klog.V(3).Info("unable to get ECR credentials from cache, checking ECR API") cfg, err := p.getFromECR(parsed) if err != nil { klog.Errorf("error getting credentials from ECR for %s %v", parsed.registry, err) return credentialprovider.DockerConfig{} } klog.V(3).Infof("Got ECR credentials from ECR API for %s", parsed.registry) return cfg } // getFromCache attempts to get credentials from the cache func (p *ecrProvider) getFromCache(parsed *parsedURL) (credentialprovider.DockerConfig, bool) { cfg := credentialprovider.DockerConfig{} obj, exists, err := p.cache.GetByKey(parsed.registry) if err != nil { klog.Errorf("error getting ECR credentials from cache: %v", err) return cfg, false } if !exists { return cfg, false } entry := obj.(*cacheEntry) cfg[entry.registry] = entry.credentials return cfg, true } // getFromECR gets credentials from ECR since they are not in the cache func (p *ecrProvider) getFromECR(parsed *parsedURL) (credentialprovider.DockerConfig, error) { cfg := credentialprovider.DockerConfig{} getter, err := p.getterFactory.GetTokenGetterForRegion(parsed.region) if err != nil { return cfg, err } params := &ecr.GetAuthorizationTokenInput{RegistryIds: []*string{aws.String(parsed.registryID)}} output, err := getter.GetAuthorizationToken(params) if err != nil { return cfg, err } if output == nil { return cfg, errors.New("authorization token is nil") } if len(output.AuthorizationData) == 0 { return cfg, errors.New("authorization data from response is empty") } data := output.AuthorizationData[0] if data.AuthorizationToken == nil { return cfg, errors.New("authorization token in response is nil") } entry, err := makeCacheEntry(data, parsed.registry) if err != nil { return cfg, err } if err := p.cache.Add(entry); err != nil { return cfg, err } cfg[entry.registry] = entry.credentials return cfg, nil } type parsedURL struct { registryID string region string registry string } // parseRepoURL parses and splits the registry URL into the registry ID, // region, and registry. // <registryID>.dkr.ecr(-fips).<region>.amazonaws.com(.cn) func parseRepoURL(image string) (*parsedURL, error) { parsed, err := url.Parse("https://" + image) if err != nil { return nil, fmt.Errorf("error parsing image %s %v", image, err) } splitURL := ecrPattern.FindStringSubmatch(parsed.Hostname()) if len(splitURL) == 0 { return nil, fmt.Errorf("%s is not a valid ECR repository URL", parsed.Hostname()) } return &parsedURL{ registryID: splitURL[1], region: splitURL[3], registry: parsed.Hostname(), }, nil } // tokenGetter is for testing purposes type tokenGetter interface { GetAuthorizationToken(input *ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) } // tokenGetterFactory is for testing purposes type tokenGetterFactory interface { GetTokenGetterForRegion(string) (tokenGetter, error) } // ecrTokenGetterFactory stores a token getter per region type ecrTokenGetterFactory struct { cache map[string]tokenGetter mutex sync.Mutex } // awsHandlerLogger is a handler that logs all AWS SDK requests // Copied from pkg/cloudprovider/providers/aws/log_handler.go func awsHandlerLogger(req *request.Request) { service := req.ClientInfo.ServiceName region := req.Config.Region name := "?" if req.Operation != nil { name = req.Operation.Name } klog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region) } func newECRTokenGetter(region string) (tokenGetter, error) { sess, err := session.NewSessionWithOptions(session.Options{ Config: aws.Config{Region: aws.String(region)}, SharedConfigState: session.SharedConfigEnable, }) if err != nil { return nil, err } getter := &ecrTokenGetter{svc: ecr.New(sess)} getter.svc.Handlers.Build.PushFrontNamed(request.NamedHandler{ Name: "k8s/user-agent", Fn: request.MakeAddToUserAgentHandler("kubernetes", version.Get().String()), }) getter.svc.Handlers.Sign.PushFrontNamed(request.NamedHandler{ Name: "k8s/logger", Fn: awsHandlerLogger, }) return getter, nil } // GetTokenGetterForRegion gets the token getter for the requested region. If it // doesn't exist, it creates a new ECR token getter func (f *ecrTokenGetterFactory) GetTokenGetterForRegion(region string) (tokenGetter, error) { f.mutex.Lock() defer f.mutex.Unlock() if getter, ok := f.cache[region]; ok { return getter, nil } getter, err := newECRTokenGetter(region) if err != nil { return nil, fmt.Errorf("unable to create token getter for region %v %v", region, err) } f.cache[region] = getter return getter, nil } // The canonical implementation type ecrTokenGetter struct { svc *ecr.ECR } // GetAuthorizationToken gets the ECR authorization token using the ECR API func (p *ecrTokenGetter) GetAuthorizationToken(input *ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) { return p.svc.GetAuthorizationToken(input) } type cacheEntry struct { expiresAt time.Time credentials credentialprovider.DockerConfigEntry registry string } // makeCacheEntry decodes the ECR authorization entry and re-packages it into a // cacheEntry. func makeCacheEntry(data *ecr.AuthorizationData, registry string) (*cacheEntry, error) { decodedToken, err := base64.StdEncoding.DecodeString(aws.StringValue(data.AuthorizationToken)) if err != nil { return nil, fmt.Errorf("error decoding ECR authorization token: %v", err) } parts := strings.SplitN(string(decodedToken), ":", 2) if len(parts) < 2 { return nil, errors.New("error getting username and password from authorization token") } creds := credentialprovider.DockerConfigEntry{ Username: parts[0], Password: parts[1], Email: "[email protected]", // ECR doesn't care and Docker is about to obsolete it } if data.ExpiresAt == nil { return nil, errors.New("authorization data expiresAt is nil") } return &cacheEntry{ expiresAt: data.ExpiresAt.Add(-1 * wait.Jitter(30*time.Minute, 0.2)), credentials: creds, registry: registry, }, nil } // ecrExpirationPolicy implements ExpirationPolicy from client-go. type ecrExpirationPolicy struct{} // stringKeyFunc returns the cache key as a string func stringKeyFunc(obj interface{}) (string, error) { key := obj.(*cacheEntry).registry return key, nil } // IsExpired checks if the ECR credentials are expired. func (p *ecrExpirationPolicy) IsExpired(entry *cache.TimestampedEntry) bool { return time.Now().After(entry.Obj.(*cacheEntry).expiresAt) }
pkg/credentialprovider/aws/aws_credentials.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.0014536557719111443, 0.0002154911489924416, 0.00016455016157124192, 0.00017094366194214672, 0.00022672436898574233 ]
{ "id": 7, "code_window": [ "const (\n", "\tnamespace = \"apiserver\"\n", "\tsubsystem = \"storage\"\n", ")\n", "\n", "var (\n", "\ttransformerLatencies = metrics.NewHistogramVec(\n", "\t\t&metrics.HistogramOpts{\n", "\t\t\tNamespace: namespace,\n", "\t\t\tSubsystem: subsystem,\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/value/metrics.go", "type": "add", "edit_start_line_idx": 34 }
/* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package ssh import ( "bytes" "context" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "encoding/pem" "errors" "fmt" "io/ioutil" mathrand "math/rand" "net" "net/http" "net/url" "os" "strings" "sync" "time" "golang.org/x/crypto/ssh" utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog" ) var ( tunnelOpenCounter = metrics.NewCounter( &metrics.CounterOpts{ Name: "ssh_tunnel_open_count", Help: "Counter of ssh tunnel total open attempts", StabilityLevel: metrics.ALPHA, }, ) tunnelOpenFailCounter = metrics.NewCounter( &metrics.CounterOpts{ Name: "ssh_tunnel_open_fail_count", Help: "Counter of ssh tunnel failed open attempts", StabilityLevel: metrics.ALPHA, }, ) ) func init() { legacyregistry.MustRegister(tunnelOpenCounter) legacyregistry.MustRegister(tunnelOpenFailCounter) } // TODO: Unit tests for this code, we can spin up a test SSH server with instructions here: // https://godoc.org/golang.org/x/crypto/ssh#ServerConn type sshTunnel struct { Config *ssh.ClientConfig Host string SSHPort string client *ssh.Client } func makeSSHTunnel(user string, signer ssh.Signer, host string) (*sshTunnel, error) { config := ssh.ClientConfig{ User: user, Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } return &sshTunnel{ Config: &config, Host: host, SSHPort: "22", }, nil } func (s *sshTunnel) Open() error { var err error s.client, err = realTimeoutDialer.Dial("tcp", net.JoinHostPort(s.Host, s.SSHPort), s.Config) tunnelOpenCounter.Inc() if err != nil { tunnelOpenFailCounter.Inc() } return err } func (s *sshTunnel) Dial(ctx context.Context, network, address string) (net.Conn, error) { if s.client == nil { return nil, errors.New("tunnel is not opened.") } // This Dial method does not allow to pass a context unfortunately return s.client.Dial(network, address) } func (s *sshTunnel) Close() error { if s.client == nil { return errors.New("Cannot close tunnel. Tunnel was not opened.") } if err := s.client.Close(); err != nil { return err } return nil } // Interface to allow mocking of ssh.Dial, for testing SSH type sshDialer interface { Dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) } // Real implementation of sshDialer type realSSHDialer struct{} var _ sshDialer = &realSSHDialer{} func (d *realSSHDialer) Dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) { conn, err := net.DialTimeout(network, addr, config.Timeout) if err != nil { return nil, err } conn.SetReadDeadline(time.Now().Add(30 * time.Second)) c, chans, reqs, err := ssh.NewClientConn(conn, addr, config) if err != nil { return nil, err } conn.SetReadDeadline(time.Time{}) return ssh.NewClient(c, chans, reqs), nil } // timeoutDialer wraps an sshDialer with a timeout around Dial(). The golang // ssh library can hang indefinitely inside the Dial() call (see issue #23835). // Wrapping all Dial() calls with a conservative timeout provides safety against // getting stuck on that. type timeoutDialer struct { dialer sshDialer timeout time.Duration } // 150 seconds is longer than the underlying default TCP backoff delay (127 // seconds). This timeout is only intended to catch otherwise uncaught hangs. const sshDialTimeout = 150 * time.Second var realTimeoutDialer sshDialer = &timeoutDialer{&realSSHDialer{}, sshDialTimeout} func (d *timeoutDialer) Dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) { config.Timeout = d.timeout return d.dialer.Dial(network, addr, config) } // RunSSHCommand returns the stdout, stderr, and exit code from running cmd on // host as specific user, along with any SSH-level error. // If user=="", it will default (like SSH) to os.Getenv("USER") func RunSSHCommand(cmd, user, host string, signer ssh.Signer) (string, string, int, error) { return runSSHCommand(realTimeoutDialer, cmd, user, host, signer, true) } // Internal implementation of runSSHCommand, for testing func runSSHCommand(dialer sshDialer, cmd, user, host string, signer ssh.Signer, retry bool) (string, string, int, error) { if user == "" { user = os.Getenv("USER") } // Setup the config, dial the server, and open a session. config := &ssh.ClientConfig{ User: user, Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, HostKeyCallback: ssh.InsecureIgnoreHostKey(), } client, err := dialer.Dial("tcp", host, config) if err != nil && retry { err = wait.Poll(5*time.Second, 20*time.Second, func() (bool, error) { fmt.Printf("error dialing %s@%s: '%v', retrying\n", user, host, err) if client, err = dialer.Dial("tcp", host, config); err != nil { return false, err } return true, nil }) } if err != nil { return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: '%v'", user, host, err) } defer client.Close() session, err := client.NewSession() if err != nil { return "", "", 0, fmt.Errorf("error creating session to %s@%s: '%v'", user, host, err) } defer session.Close() // Run the command. code := 0 var bout, berr bytes.Buffer session.Stdout, session.Stderr = &bout, &berr if err = session.Run(cmd); err != nil { // Check whether the command failed to run or didn't complete. if exiterr, ok := err.(*ssh.ExitError); ok { // If we got an ExitError and the exit code is nonzero, we'll // consider the SSH itself successful (just that the command run // errored on the host). if code = exiterr.ExitStatus(); code != 0 { err = nil } } else { // Some other kind of error happened (e.g. an IOError); consider the // SSH unsuccessful. err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err) } } return bout.String(), berr.String(), code, err } func MakePrivateKeySignerFromFile(key string) (ssh.Signer, error) { // Create an actual signer. buffer, err := ioutil.ReadFile(key) if err != nil { return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err) } return MakePrivateKeySignerFromBytes(buffer) } func MakePrivateKeySignerFromBytes(buffer []byte) (ssh.Signer, error) { signer, err := ssh.ParsePrivateKey(buffer) if err != nil { return nil, fmt.Errorf("error parsing SSH key: '%v'", err) } return signer, nil } func ParsePublicKeyFromFile(keyFile string) (*rsa.PublicKey, error) { buffer, err := ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("error reading SSH key %s: '%v'", keyFile, err) } keyBlock, _ := pem.Decode(buffer) if keyBlock == nil { return nil, fmt.Errorf("error parsing SSH key %s: 'invalid PEM format'", keyFile) } key, err := x509.ParsePKIXPublicKey(keyBlock.Bytes) if err != nil { return nil, fmt.Errorf("error parsing SSH key %s: '%v'", keyFile, err) } rsaKey, ok := key.(*rsa.PublicKey) if !ok { return nil, fmt.Errorf("SSH key could not be parsed as rsa public key") } return rsaKey, nil } type tunnel interface { Open() error Close() error Dial(ctx context.Context, network, address string) (net.Conn, error) } type sshTunnelEntry struct { Address string Tunnel tunnel } type sshTunnelCreator interface { newSSHTunnel(user, keyFile, host string) (tunnel, error) } type realTunnelCreator struct{} func (*realTunnelCreator) newSSHTunnel(user, keyFile, host string) (tunnel, error) { signer, err := MakePrivateKeySignerFromFile(keyFile) if err != nil { return nil, err } return makeSSHTunnel(user, signer, host) } type SSHTunnelList struct { entries []sshTunnelEntry adding map[string]bool tunnelCreator sshTunnelCreator tunnelsLock sync.Mutex user string keyfile string healthCheckURL *url.URL } func NewSSHTunnelList(user, keyfile string, healthCheckURL *url.URL, stopChan chan struct{}) *SSHTunnelList { l := &SSHTunnelList{ adding: make(map[string]bool), tunnelCreator: &realTunnelCreator{}, user: user, keyfile: keyfile, healthCheckURL: healthCheckURL, } healthCheckPoll := 1 * time.Minute go wait.Until(func() { l.tunnelsLock.Lock() defer l.tunnelsLock.Unlock() // Healthcheck each tunnel every minute numTunnels := len(l.entries) for i, entry := range l.entries { // Stagger healthchecks evenly across duration of healthCheckPoll. delay := healthCheckPoll * time.Duration(i) / time.Duration(numTunnels) l.delayedHealthCheck(entry, delay) } }, healthCheckPoll, stopChan) return l } func (l *SSHTunnelList) delayedHealthCheck(e sshTunnelEntry, delay time.Duration) { go func() { defer runtime.HandleCrash() time.Sleep(delay) if err := l.healthCheck(e); err != nil { klog.Errorf("Healthcheck failed for tunnel to %q: %v", e.Address, err) klog.Infof("Attempting once to re-establish tunnel to %q", e.Address) l.removeAndReAdd(e) } }() } func (l *SSHTunnelList) healthCheck(e sshTunnelEntry) error { // GET the healthcheck path using the provided tunnel's dial function. transport := utilnet.SetTransportDefaults(&http.Transport{ DialContext: e.Tunnel.Dial, // TODO(cjcullen): Plumb real TLS options through. TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // We don't reuse the clients, so disable the keep-alive to properly // close the connection. DisableKeepAlives: true, }) client := &http.Client{Transport: transport} resp, err := client.Get(l.healthCheckURL.String()) if err != nil { return err } resp.Body.Close() return nil } func (l *SSHTunnelList) removeAndReAdd(e sshTunnelEntry) { // Find the entry to replace. l.tunnelsLock.Lock() for i, entry := range l.entries { if entry.Tunnel == e.Tunnel { l.entries = append(l.entries[:i], l.entries[i+1:]...) l.adding[e.Address] = true break } } l.tunnelsLock.Unlock() if err := e.Tunnel.Close(); err != nil { klog.Infof("Failed to close removed tunnel: %v", err) } go l.createAndAddTunnel(e.Address) } func (l *SSHTunnelList) Dial(ctx context.Context, net, addr string) (net.Conn, error) { start := time.Now() id := mathrand.Int63() // So you can match begins/ends in the log. klog.Infof("[%x: %v] Dialing...", id, addr) defer func() { klog.Infof("[%x: %v] Dialed in %v.", id, addr, time.Since(start)) }() tunnel, err := l.pickTunnel(strings.Split(addr, ":")[0]) if err != nil { return nil, err } return tunnel.Dial(ctx, net, addr) } func (l *SSHTunnelList) pickTunnel(addr string) (tunnel, error) { l.tunnelsLock.Lock() defer l.tunnelsLock.Unlock() if len(l.entries) == 0 { return nil, fmt.Errorf("No SSH tunnels currently open. Were the targets able to accept an ssh-key for user %q?", l.user) } // Prefer same tunnel as kubelet for _, entry := range l.entries { if entry.Address == addr { return entry.Tunnel, nil } } klog.Warningf("SSH tunnel not found for address %q, picking random node", addr) n := mathrand.Intn(len(l.entries)) return l.entries[n].Tunnel, nil } // Update reconciles the list's entries with the specified addresses. Existing // tunnels that are not in addresses are removed from entries and closed in a // background goroutine. New tunnels specified in addresses are opened in a // background goroutine and then added to entries. func (l *SSHTunnelList) Update(addrs []string) { haveAddrsMap := make(map[string]bool) wantAddrsMap := make(map[string]bool) func() { l.tunnelsLock.Lock() defer l.tunnelsLock.Unlock() // Build a map of what we currently have. for i := range l.entries { haveAddrsMap[l.entries[i].Address] = true } // Determine any necessary additions. for i := range addrs { // Add tunnel if it is not in l.entries or l.adding if _, ok := haveAddrsMap[addrs[i]]; !ok { if _, ok := l.adding[addrs[i]]; !ok { l.adding[addrs[i]] = true addr := addrs[i] go func() { defer runtime.HandleCrash() // Actually adding tunnel to list will block until lock // is released after deletions. l.createAndAddTunnel(addr) }() } } wantAddrsMap[addrs[i]] = true } // Determine any necessary deletions. var newEntries []sshTunnelEntry for i := range l.entries { if _, ok := wantAddrsMap[l.entries[i].Address]; !ok { tunnelEntry := l.entries[i] klog.Infof("Removing tunnel to deleted node at %q", tunnelEntry.Address) go func() { defer runtime.HandleCrash() if err := tunnelEntry.Tunnel.Close(); err != nil { klog.Errorf("Failed to close tunnel to %q: %v", tunnelEntry.Address, err) } }() } else { newEntries = append(newEntries, l.entries[i]) } } l.entries = newEntries }() } func (l *SSHTunnelList) createAndAddTunnel(addr string) { klog.Infof("Trying to add tunnel to %q", addr) tunnel, err := l.tunnelCreator.newSSHTunnel(l.user, l.keyfile, addr) if err != nil { klog.Errorf("Failed to create tunnel for %q: %v", addr, err) return } if err := tunnel.Open(); err != nil { klog.Errorf("Failed to open tunnel to %q: %v", addr, err) l.tunnelsLock.Lock() delete(l.adding, addr) l.tunnelsLock.Unlock() return } l.tunnelsLock.Lock() l.entries = append(l.entries, sshTunnelEntry{addr, tunnel}) delete(l.adding, addr) l.tunnelsLock.Unlock() klog.Infof("Successfully added tunnel for %q", addr) } func EncodePrivateKey(private *rsa.PrivateKey) []byte { return pem.EncodeToMemory(&pem.Block{ Bytes: x509.MarshalPKCS1PrivateKey(private), Type: "RSA PRIVATE KEY", }) } func EncodePublicKey(public *rsa.PublicKey) ([]byte, error) { publicBytes, err := x509.MarshalPKIXPublicKey(public) if err != nil { return nil, err } return pem.EncodeToMemory(&pem.Block{ Bytes: publicBytes, Type: "PUBLIC KEY", }), nil } func EncodeSSHKey(public *rsa.PublicKey) ([]byte, error) { publicKey, err := ssh.NewPublicKey(public) if err != nil { return nil, err } return ssh.MarshalAuthorizedKey(publicKey), nil } func GenerateKey(bits int) (*rsa.PrivateKey, *rsa.PublicKey, error) { private, err := rsa.GenerateKey(rand.Reader, bits) if err != nil { return nil, nil, err } return private, &private.PublicKey, nil }
pkg/ssh/ssh.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.010129431262612343, 0.0003941705508623272, 0.00016372458776459098, 0.00017096486408263445, 0.0013861001934856176 ]
{ "id": 7, "code_window": [ "const (\n", "\tnamespace = \"apiserver\"\n", "\tsubsystem = \"storage\"\n", ")\n", "\n", "var (\n", "\ttransformerLatencies = metrics.NewHistogramVec(\n", "\t\t&metrics.HistogramOpts{\n", "\t\t\tNamespace: namespace,\n", "\t\t\tSubsystem: subsystem,\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/value/metrics.go", "type": "add", "edit_start_line_idx": 34 }
// Copyright 2017 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcproxy import ( "context" "net/http" "time" "github.com/coreos/etcd/clientv3" "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" ) // HandleHealth registers health handler on '/health'. func HandleHealth(mux *http.ServeMux, c *clientv3.Client) { mux.Handle(etcdhttp.PathHealth, etcdhttp.NewHealthHandler(func() etcdhttp.Health { return checkHealth(c) })) } func checkHealth(c *clientv3.Client) etcdhttp.Health { h := etcdhttp.Health{Health: "false"} ctx, cancel := context.WithTimeout(c.Ctx(), time.Second) _, err := c.Get(ctx, "a") cancel() if err == nil || err == rpctypes.ErrPermissionDenied { h.Health = "true" } return h }
vendor/github.com/coreos/etcd/proxy/grpcproxy/health.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017885444685816765, 0.00017579041013959795, 0.00017063686391338706, 0.00017727070371620357, 0.000003251096131862141 ]
{ "id": 7, "code_window": [ "const (\n", "\tnamespace = \"apiserver\"\n", "\tsubsystem = \"storage\"\n", ")\n", "\n", "var (\n", "\ttransformerLatencies = metrics.NewHistogramVec(\n", "\t\t&metrics.HistogramOpts{\n", "\t\t\tNamespace: namespace,\n", "\t\t\tSubsystem: subsystem,\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/value/metrics.go", "type": "add", "edit_start_line_idx": 34 }
base.go export-subst
staging/src/k8s.io/client-go/pkg/version/.gitattributes
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017173292872030288, 0.00017173292872030288, 0.00017173292872030288, 0.00017173292872030288, 0 ]
{ "id": 7, "code_window": [ "const (\n", "\tnamespace = \"apiserver\"\n", "\tsubsystem = \"storage\"\n", ")\n", "\n", "var (\n", "\ttransformerLatencies = metrics.NewHistogramVec(\n", "\t\t&metrics.HistogramOpts{\n", "\t\t\tNamespace: namespace,\n", "\t\t\tSubsystem: subsystem,\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/apiserver/pkg/storage/value/metrics.go", "type": "add", "edit_start_line_idx": 34 }
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package validation import ( "fmt" "strings" genericvalidation "k8s.io/apimachinery/pkg/api/validation" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" utilvalidation "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/util/webhook" "k8s.io/kubernetes/pkg/apis/admissionregistration" "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1" ) func hasWildcard(slice []string) bool { for _, s := range slice { if s == "*" { return true } } return false } func validateResources(resources []string, fldPath *field.Path) field.ErrorList { var allErrors field.ErrorList if len(resources) == 0 { allErrors = append(allErrors, field.Required(fldPath, "")) } // */x resourcesWithWildcardSubresoures := sets.String{} // x/* subResourcesWithWildcardResource := sets.String{} // */* hasDoubleWildcard := false // * hasSingleWildcard := false // x hasResourceWithoutSubresource := false for i, resSub := range resources { if resSub == "" { allErrors = append(allErrors, field.Required(fldPath.Index(i), "")) continue } if resSub == "*/*" { hasDoubleWildcard = true } if resSub == "*" { hasSingleWildcard = true } parts := strings.SplitN(resSub, "/", 2) if len(parts) == 1 { hasResourceWithoutSubresource = resSub != "*" continue } res, sub := parts[0], parts[1] if _, ok := resourcesWithWildcardSubresoures[res]; ok { allErrors = append(allErrors, field.Invalid(fldPath.Index(i), resSub, fmt.Sprintf("if '%s/*' is present, must not specify %s", res, resSub))) } if _, ok := subResourcesWithWildcardResource[sub]; ok { allErrors = append(allErrors, field.Invalid(fldPath.Index(i), resSub, fmt.Sprintf("if '*/%s' is present, must not specify %s", sub, resSub))) } if sub == "*" { resourcesWithWildcardSubresoures[res] = struct{}{} } if res == "*" { subResourcesWithWildcardResource[sub] = struct{}{} } } if len(resources) > 1 && hasDoubleWildcard { allErrors = append(allErrors, field.Invalid(fldPath, resources, "if '*/*' is present, must not specify other resources")) } if hasSingleWildcard && hasResourceWithoutSubresource { allErrors = append(allErrors, field.Invalid(fldPath, resources, "if '*' is present, must not specify other resources without subresources")) } return allErrors } func validateResourcesNoSubResources(resources []string, fldPath *field.Path) field.ErrorList { var allErrors field.ErrorList if len(resources) == 0 { allErrors = append(allErrors, field.Required(fldPath, "")) } for i, resource := range resources { if resource == "" { allErrors = append(allErrors, field.Required(fldPath.Index(i), "")) } if strings.Contains(resource, "/") { allErrors = append(allErrors, field.Invalid(fldPath.Index(i), resource, "must not specify subresources")) } } if len(resources) > 1 && hasWildcard(resources) { allErrors = append(allErrors, field.Invalid(fldPath, resources, "if '*' is present, must not specify other resources")) } return allErrors } var validScopes = sets.NewString( string(admissionregistration.ClusterScope), string(admissionregistration.NamespacedScope), string(admissionregistration.AllScopes), ) func validateRule(rule *admissionregistration.Rule, fldPath *field.Path, allowSubResource bool) field.ErrorList { var allErrors field.ErrorList if len(rule.APIGroups) == 0 { allErrors = append(allErrors, field.Required(fldPath.Child("apiGroups"), "")) } if len(rule.APIGroups) > 1 && hasWildcard(rule.APIGroups) { allErrors = append(allErrors, field.Invalid(fldPath.Child("apiGroups"), rule.APIGroups, "if '*' is present, must not specify other API groups")) } // Note: group could be empty, e.g., the legacy "v1" API if len(rule.APIVersions) == 0 { allErrors = append(allErrors, field.Required(fldPath.Child("apiVersions"), "")) } if len(rule.APIVersions) > 1 && hasWildcard(rule.APIVersions) { allErrors = append(allErrors, field.Invalid(fldPath.Child("apiVersions"), rule.APIVersions, "if '*' is present, must not specify other API versions")) } for i, version := range rule.APIVersions { if version == "" { allErrors = append(allErrors, field.Required(fldPath.Child("apiVersions").Index(i), "")) } } if allowSubResource { allErrors = append(allErrors, validateResources(rule.Resources, fldPath.Child("resources"))...) } else { allErrors = append(allErrors, validateResourcesNoSubResources(rule.Resources, fldPath.Child("resources"))...) } if rule.Scope != nil && !validScopes.Has(string(*rule.Scope)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("scope"), *rule.Scope, validScopes.List())) } return allErrors } // AcceptedAdmissionReviewVersions contains the list of AdmissionReview versions the *prior* version of the API server understands. // 1.15: server understands v1beta1; accepted versions are ["v1beta1"] // 1.16: server understands v1, v1beta1; accepted versions are ["v1beta1"] // 1.17: server understands v1, v1beta1; accepted versions are ["v1","v1beta1"] var AcceptedAdmissionReviewVersions = []string{v1beta1.SchemeGroupVersion.Version} func isAcceptedAdmissionReviewVersion(v string) bool { for _, version := range AcceptedAdmissionReviewVersions { if v == version { return true } } return false } func validateAdmissionReviewVersions(versions []string, requireRecognizedAdmissionReviewVersion bool, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} // Currently only v1beta1 accepted in AdmissionReviewVersions if len(versions) < 1 { allErrors = append(allErrors, field.Required(fldPath, fmt.Sprintf("must specify one of %v", strings.Join(AcceptedAdmissionReviewVersions, ", ")))) } else { seen := map[string]bool{} hasAcceptedVersion := false for i, v := range versions { if seen[v] { allErrors = append(allErrors, field.Invalid(fldPath.Index(i), v, "duplicate version")) continue } seen[v] = true for _, errString := range utilvalidation.IsDNS1035Label(v) { allErrors = append(allErrors, field.Invalid(fldPath.Index(i), v, errString)) } if isAcceptedAdmissionReviewVersion(v) { hasAcceptedVersion = true } } if requireRecognizedAdmissionReviewVersion && !hasAcceptedVersion { allErrors = append(allErrors, field.Invalid( fldPath, versions, fmt.Sprintf("must include at least one of %v", strings.Join(AcceptedAdmissionReviewVersions, ", ")))) } } return allErrors } // ValidateValidatingWebhookConfiguration validates a webhook before creation. func ValidateValidatingWebhookConfiguration(e *admissionregistration.ValidatingWebhookConfiguration, requestGV schema.GroupVersion) field.ErrorList { return validateValidatingWebhookConfiguration(e, validationOptions{ requireNoSideEffects: requireNoSideEffects(requestGV), requireRecognizedAdmissionReviewVersion: true, requireUniqueWebhookNames: requireUniqueWebhookNames(requestGV), }) } func validateValidatingWebhookConfiguration(e *admissionregistration.ValidatingWebhookConfiguration, opts validationOptions) field.ErrorList { allErrors := genericvalidation.ValidateObjectMeta(&e.ObjectMeta, false, genericvalidation.NameIsDNSSubdomain, field.NewPath("metadata")) hookNames := sets.NewString() for i, hook := range e.Webhooks { allErrors = append(allErrors, validateValidatingWebhook(&hook, opts, field.NewPath("webhooks").Index(i))...) allErrors = append(allErrors, validateAdmissionReviewVersions(hook.AdmissionReviewVersions, opts.requireRecognizedAdmissionReviewVersion, field.NewPath("webhooks").Index(i).Child("admissionReviewVersions"))...) if opts.requireUniqueWebhookNames && len(hook.Name) > 0 { if hookNames.Has(hook.Name) { allErrors = append(allErrors, field.Duplicate(field.NewPath("webhooks").Index(i).Child("name"), hook.Name)) } hookNames.Insert(hook.Name) } } return allErrors } // ValidateMutatingWebhookConfiguration validates a webhook before creation. func ValidateMutatingWebhookConfiguration(e *admissionregistration.MutatingWebhookConfiguration, requestGV schema.GroupVersion) field.ErrorList { return validateMutatingWebhookConfiguration(e, validationOptions{ requireNoSideEffects: requireNoSideEffects(requestGV), requireRecognizedAdmissionReviewVersion: true, requireUniqueWebhookNames: requireUniqueWebhookNames(requestGV), }) } type validationOptions struct { requireNoSideEffects bool requireRecognizedAdmissionReviewVersion bool requireUniqueWebhookNames bool } func validateMutatingWebhookConfiguration(e *admissionregistration.MutatingWebhookConfiguration, opts validationOptions) field.ErrorList { allErrors := genericvalidation.ValidateObjectMeta(&e.ObjectMeta, false, genericvalidation.NameIsDNSSubdomain, field.NewPath("metadata")) hookNames := sets.NewString() for i, hook := range e.Webhooks { allErrors = append(allErrors, validateMutatingWebhook(&hook, opts, field.NewPath("webhooks").Index(i))...) allErrors = append(allErrors, validateAdmissionReviewVersions(hook.AdmissionReviewVersions, opts.requireRecognizedAdmissionReviewVersion, field.NewPath("webhooks").Index(i).Child("admissionReviewVersions"))...) if opts.requireUniqueWebhookNames && len(hook.Name) > 0 { if hookNames.Has(hook.Name) { allErrors = append(allErrors, field.Duplicate(field.NewPath("webhooks").Index(i).Child("name"), hook.Name)) } hookNames.Insert(hook.Name) } } return allErrors } func validateValidatingWebhook(hook *admissionregistration.ValidatingWebhook, opts validationOptions, fldPath *field.Path) field.ErrorList { var allErrors field.ErrorList // hook.Name must be fully qualified allErrors = append(allErrors, utilvalidation.IsFullyQualifiedName(fldPath.Child("name"), hook.Name)...) for i, rule := range hook.Rules { allErrors = append(allErrors, validateRuleWithOperations(&rule, fldPath.Child("rules").Index(i))...) } if hook.FailurePolicy != nil && !supportedFailurePolicies.Has(string(*hook.FailurePolicy)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("failurePolicy"), *hook.FailurePolicy, supportedFailurePolicies.List())) } if hook.MatchPolicy != nil && !supportedMatchPolicies.Has(string(*hook.MatchPolicy)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("matchPolicy"), *hook.MatchPolicy, supportedMatchPolicies.List())) } allowedSideEffects := supportedSideEffectClasses if opts.requireNoSideEffects { allowedSideEffects = noSideEffectClasses } if hook.SideEffects == nil { allErrors = append(allErrors, field.Required(fldPath.Child("sideEffects"), fmt.Sprintf("must specify one of %v", strings.Join(allowedSideEffects.List(), ", ")))) } if hook.SideEffects != nil && !allowedSideEffects.Has(string(*hook.SideEffects)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("sideEffects"), *hook.SideEffects, allowedSideEffects.List())) } if hook.TimeoutSeconds != nil && (*hook.TimeoutSeconds > 30 || *hook.TimeoutSeconds < 1) { allErrors = append(allErrors, field.Invalid(fldPath.Child("timeoutSeconds"), *hook.TimeoutSeconds, "the timeout value must be between 1 and 30 seconds")) } if hook.NamespaceSelector != nil { allErrors = append(allErrors, metav1validation.ValidateLabelSelector(hook.NamespaceSelector, fldPath.Child("namespaceSelector"))...) } if hook.ObjectSelector != nil { allErrors = append(allErrors, metav1validation.ValidateLabelSelector(hook.ObjectSelector, fldPath.Child("objectSelector"))...) } cc := hook.ClientConfig switch { case (cc.URL == nil) == (cc.Service == nil): allErrors = append(allErrors, field.Required(fldPath.Child("clientConfig"), "exactly one of url or service is required")) case cc.URL != nil: allErrors = append(allErrors, webhook.ValidateWebhookURL(fldPath.Child("clientConfig").Child("url"), *cc.URL, true)...) case cc.Service != nil: allErrors = append(allErrors, webhook.ValidateWebhookService(fldPath.Child("clientConfig").Child("service"), cc.Service.Name, cc.Service.Namespace, cc.Service.Path, cc.Service.Port)...) } return allErrors } func validateMutatingWebhook(hook *admissionregistration.MutatingWebhook, opts validationOptions, fldPath *field.Path) field.ErrorList { var allErrors field.ErrorList // hook.Name must be fully qualified allErrors = append(allErrors, utilvalidation.IsFullyQualifiedName(fldPath.Child("name"), hook.Name)...) for i, rule := range hook.Rules { allErrors = append(allErrors, validateRuleWithOperations(&rule, fldPath.Child("rules").Index(i))...) } if hook.FailurePolicy != nil && !supportedFailurePolicies.Has(string(*hook.FailurePolicy)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("failurePolicy"), *hook.FailurePolicy, supportedFailurePolicies.List())) } if hook.MatchPolicy != nil && !supportedMatchPolicies.Has(string(*hook.MatchPolicy)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("matchPolicy"), *hook.MatchPolicy, supportedMatchPolicies.List())) } allowedSideEffects := supportedSideEffectClasses if opts.requireNoSideEffects { allowedSideEffects = noSideEffectClasses } if hook.SideEffects == nil { allErrors = append(allErrors, field.Required(fldPath.Child("sideEffects"), fmt.Sprintf("must specify one of %v", strings.Join(allowedSideEffects.List(), ", ")))) } if hook.SideEffects != nil && !allowedSideEffects.Has(string(*hook.SideEffects)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("sideEffects"), *hook.SideEffects, allowedSideEffects.List())) } if hook.TimeoutSeconds != nil && (*hook.TimeoutSeconds > 30 || *hook.TimeoutSeconds < 1) { allErrors = append(allErrors, field.Invalid(fldPath.Child("timeoutSeconds"), *hook.TimeoutSeconds, "the timeout value must be between 1 and 30 seconds")) } if hook.NamespaceSelector != nil { allErrors = append(allErrors, metav1validation.ValidateLabelSelector(hook.NamespaceSelector, fldPath.Child("namespaceSelector"))...) } if hook.ObjectSelector != nil { allErrors = append(allErrors, metav1validation.ValidateLabelSelector(hook.ObjectSelector, fldPath.Child("objectSelector"))...) } if hook.ReinvocationPolicy != nil && !supportedReinvocationPolicies.Has(string(*hook.ReinvocationPolicy)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("reinvocationPolicy"), *hook.ReinvocationPolicy, supportedReinvocationPolicies.List())) } cc := hook.ClientConfig switch { case (cc.URL == nil) == (cc.Service == nil): allErrors = append(allErrors, field.Required(fldPath.Child("clientConfig"), "exactly one of url or service is required")) case cc.URL != nil: allErrors = append(allErrors, webhook.ValidateWebhookURL(fldPath.Child("clientConfig").Child("url"), *cc.URL, true)...) case cc.Service != nil: allErrors = append(allErrors, webhook.ValidateWebhookService(fldPath.Child("clientConfig").Child("service"), cc.Service.Name, cc.Service.Namespace, cc.Service.Path, cc.Service.Port)...) } return allErrors } var supportedFailurePolicies = sets.NewString( string(admissionregistration.Ignore), string(admissionregistration.Fail), ) var supportedMatchPolicies = sets.NewString( string(admissionregistration.Exact), string(admissionregistration.Equivalent), ) var supportedSideEffectClasses = sets.NewString( string(admissionregistration.SideEffectClassUnknown), string(admissionregistration.SideEffectClassNone), string(admissionregistration.SideEffectClassSome), string(admissionregistration.SideEffectClassNoneOnDryRun), ) var noSideEffectClasses = sets.NewString( string(admissionregistration.SideEffectClassNone), string(admissionregistration.SideEffectClassNoneOnDryRun), ) var supportedOperations = sets.NewString( string(admissionregistration.OperationAll), string(admissionregistration.Create), string(admissionregistration.Update), string(admissionregistration.Delete), string(admissionregistration.Connect), ) var supportedReinvocationPolicies = sets.NewString( string(admissionregistration.NeverReinvocationPolicy), string(admissionregistration.IfNeededReinvocationPolicy), ) func hasWildcardOperation(operations []admissionregistration.OperationType) bool { for _, o := range operations { if o == admissionregistration.OperationAll { return true } } return false } func validateRuleWithOperations(ruleWithOperations *admissionregistration.RuleWithOperations, fldPath *field.Path) field.ErrorList { var allErrors field.ErrorList if len(ruleWithOperations.Operations) == 0 { allErrors = append(allErrors, field.Required(fldPath.Child("operations"), "")) } if len(ruleWithOperations.Operations) > 1 && hasWildcardOperation(ruleWithOperations.Operations) { allErrors = append(allErrors, field.Invalid(fldPath.Child("operations"), ruleWithOperations.Operations, "if '*' is present, must not specify other operations")) } for i, operation := range ruleWithOperations.Operations { if !supportedOperations.Has(string(operation)) { allErrors = append(allErrors, field.NotSupported(fldPath.Child("operations").Index(i), operation, supportedOperations.List())) } } allowSubResource := true allErrors = append(allErrors, validateRule(&ruleWithOperations.Rule, fldPath, allowSubResource)...) return allErrors } // mutatingHasAcceptedAdmissionReviewVersions returns true if all webhooks have at least one // admission review version this apiserver accepts. func mutatingHasAcceptedAdmissionReviewVersions(webhooks []admissionregistration.MutatingWebhook) bool { for _, hook := range webhooks { hasRecognizedVersion := false for _, version := range hook.AdmissionReviewVersions { if isAcceptedAdmissionReviewVersion(version) { hasRecognizedVersion = true break } } if !hasRecognizedVersion && len(hook.AdmissionReviewVersions) > 0 { return false } } return true } // validatingHasAcceptedAdmissionReviewVersions returns true if all webhooks have at least one // admission review version this apiserver accepts. func validatingHasAcceptedAdmissionReviewVersions(webhooks []admissionregistration.ValidatingWebhook) bool { for _, hook := range webhooks { hasRecognizedVersion := false for _, version := range hook.AdmissionReviewVersions { if isAcceptedAdmissionReviewVersion(version) { hasRecognizedVersion = true break } } if !hasRecognizedVersion && len(hook.AdmissionReviewVersions) > 0 { return false } } return true } // mutatingHasUniqueWebhookNames returns true if all webhooks have unique names func mutatingHasUniqueWebhookNames(webhooks []admissionregistration.MutatingWebhook) bool { names := sets.NewString() for _, hook := range webhooks { if names.Has(hook.Name) { return false } names.Insert(hook.Name) } return true } // validatingHasUniqueWebhookNames returns true if all webhooks have unique names func validatingHasUniqueWebhookNames(webhooks []admissionregistration.ValidatingWebhook) bool { names := sets.NewString() for _, hook := range webhooks { if names.Has(hook.Name) { return false } names.Insert(hook.Name) } return true } // mutatingHasNoSideEffects returns true if all webhooks have no side effects func mutatingHasNoSideEffects(webhooks []admissionregistration.MutatingWebhook) bool { for _, hook := range webhooks { if hook.SideEffects == nil || !noSideEffectClasses.Has(string(*hook.SideEffects)) { return false } } return true } // validatingHasNoSideEffects returns true if all webhooks have no side effects func validatingHasNoSideEffects(webhooks []admissionregistration.ValidatingWebhook) bool { for _, hook := range webhooks { if hook.SideEffects == nil || !noSideEffectClasses.Has(string(*hook.SideEffects)) { return false } } return true } func ValidateValidatingWebhookConfigurationUpdate(newC, oldC *admissionregistration.ValidatingWebhookConfiguration, requestGV schema.GroupVersion) field.ErrorList { return validateValidatingWebhookConfiguration(newC, validationOptions{ requireNoSideEffects: requireNoSideEffects(requestGV) && validatingHasNoSideEffects(oldC.Webhooks), requireRecognizedAdmissionReviewVersion: validatingHasAcceptedAdmissionReviewVersions(oldC.Webhooks), requireUniqueWebhookNames: requireUniqueWebhookNames(requestGV) && validatingHasUniqueWebhookNames(oldC.Webhooks), }) } func ValidateMutatingWebhookConfigurationUpdate(newC, oldC *admissionregistration.MutatingWebhookConfiguration, requestGV schema.GroupVersion) field.ErrorList { return validateMutatingWebhookConfiguration(newC, validationOptions{ requireNoSideEffects: requireNoSideEffects(requestGV) && mutatingHasNoSideEffects(oldC.Webhooks), requireRecognizedAdmissionReviewVersion: mutatingHasAcceptedAdmissionReviewVersions(oldC.Webhooks), requireUniqueWebhookNames: requireUniqueWebhookNames(requestGV) && mutatingHasUniqueWebhookNames(oldC.Webhooks), }) } // requireUniqueWebhookNames returns true for all requests except v1beta1 (for backwards compatibility) func requireUniqueWebhookNames(requestGV schema.GroupVersion) bool { return requestGV != (schema.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}) } // requireNoSideEffects returns true for all requests except v1beta1 (for backwards compatibility) func requireNoSideEffects(requestGV schema.GroupVersion) bool { return requestGV != (schema.GroupVersion{Group: admissionregistration.GroupName, Version: "v1beta1"}) }
pkg/apis/admissionregistration/validation/validation.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.009018126875162125, 0.0003673549508675933, 0.00016459597100038081, 0.0001719636347843334, 0.0012165603693574667 ]
{ "id": 8, "code_window": [ "import (\n", "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var (\n", "\tunavailableCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n", "\t\t\tName: \"aggregator_unavailable_apiservice_count\",\n", "\t\t\tHelp: \"Counter of APIServices which are marked as unavailable broken down by APIService name and reason.\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go", "type": "add", "edit_start_line_idx": 23 }
// +build !providerless /* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package gce import ( "context" "encoding/json" "net/http" "strings" "time" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "k8s.io/client-go/util/flowcontrol" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) const ( // Max QPS to allow through to the token URL. tokenURLQPS = .05 // back off to once every 20 seconds when failing // Maximum burst of requests to token URL before limiting. tokenURLBurst = 3 ) var ( getTokenCounter = metrics.NewCounter( &metrics.CounterOpts{ Name: "get_token_count", Help: "Counter of total Token() requests to the alternate token source", StabilityLevel: metrics.ALPHA, }, ) getTokenFailCounter = metrics.NewCounter( &metrics.CounterOpts{ Name: "get_token_fail_count", Help: "Counter of failed Token() requests to the alternate token source", StabilityLevel: metrics.ALPHA, }, ) ) func init() { legacyregistry.MustRegister(getTokenCounter) legacyregistry.MustRegister(getTokenFailCounter) } // AltTokenSource is the structure holding the data for the functionality needed to generates tokens type AltTokenSource struct { oauthClient *http.Client tokenURL string tokenBody string throttle flowcontrol.RateLimiter } // Token returns a token which may be used for authentication func (a *AltTokenSource) Token() (*oauth2.Token, error) { a.throttle.Accept() getTokenCounter.Inc() t, err := a.token() if err != nil { getTokenFailCounter.Inc() } return t, err } func (a *AltTokenSource) token() (*oauth2.Token, error) { req, err := http.NewRequest("POST", a.tokenURL, strings.NewReader(a.tokenBody)) if err != nil { return nil, err } res, err := a.oauthClient.Do(req) if err != nil { return nil, err } defer res.Body.Close() if err := googleapi.CheckResponse(res); err != nil { return nil, err } var tok struct { AccessToken string `json:"accessToken"` ExpireTime time.Time `json:"expireTime"` } if err := json.NewDecoder(res.Body).Decode(&tok); err != nil { return nil, err } return &oauth2.Token{ AccessToken: tok.AccessToken, Expiry: tok.ExpireTime, }, nil } // NewAltTokenSource constructs a new alternate token source for generating tokens. func NewAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { client := oauth2.NewClient(context.Background(), google.ComputeTokenSource("")) a := &AltTokenSource{ oauthClient: client, tokenURL: tokenURL, tokenBody: tokenBody, throttle: flowcontrol.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst), } return oauth2.ReuseTokenSource(nil, a) }
staging/src/k8s.io/legacy-cloud-providers/gce/token_source.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.09684431552886963, 0.009316586889326572, 0.00016542909725103527, 0.0001729591895127669, 0.025701815262436867 ]
{ "id": 8, "code_window": [ "import (\n", "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var (\n", "\tunavailableCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n", "\t\t\tName: \"aggregator_unavailable_apiservice_count\",\n", "\t\t\tHelp: \"Counter of APIServices which are marked as unavailable broken down by APIService name and reason.\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go", "type": "add", "edit_start_line_idx": 23 }
/* * * Copyright 2017 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package naming import ( "context" "errors" "fmt" "net" "strconv" "time" "google.golang.org/grpc/grpclog" ) const ( defaultPort = "443" defaultFreq = time.Minute * 30 ) var ( errMissingAddr = errors.New("missing address") errWatcherClose = errors.New("watcher has been closed") lookupHost = net.DefaultResolver.LookupHost lookupSRV = net.DefaultResolver.LookupSRV ) // NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and // create watchers that poll the DNS server using the frequency set by freq. func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { return &dnsResolver{freq: freq}, nil } // NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create // watchers that poll the DNS server using the default frequency defined by defaultFreq. func NewDNSResolver() (Resolver, error) { return NewDNSResolverWithFreq(defaultFreq) } // dnsResolver handles name resolution for names following the DNS scheme type dnsResolver struct { // frequency of polling the DNS server that the watchers created by this resolver will use. freq time.Duration } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. // If addr is an IPv4 address, return the addr and ok = true. // If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { return "", false } if ip.To4() != nil { return addr, true } return "[" + addr + "]", true } // parseTarget takes the user input target string, returns formatted host and port info. // If target doesn't specify a port, set the port to be the defaultPort. // If target is in IPv6 format and host-name is enclosed in square brackets, brackets // are stripped when setting the host. // examples: // target: "www.google.com" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" returns host: "ipv4-host", port: "80" // target: "[ipv6-host]" returns host: "ipv6-host", port: "443" // target: ":80" returns host: "localhost", port: "80" // target: ":" returns host: "localhost", port: "443" func parseTarget(target string) (host, port string, err error) { if target == "" { return "", "", errMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address return target, defaultPort, nil } if host, port, err := net.SplitHostPort(target); err == nil { // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. host = "localhost" } if port == "" { // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. port = defaultPort } return host, port, nil } if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { // target doesn't have port return host, port, nil } return "", "", fmt.Errorf("invalid target address %v", target) } // Resolve creates a watcher that watches the name resolution of the target. func (r *dnsResolver) Resolve(target string) (Watcher, error) { host, port, err := parseTarget(target) if err != nil { return nil, err } if net.ParseIP(host) != nil { ipWatcher := &ipWatcher{ updateChan: make(chan *Update, 1), } host, _ = formatIP(host) ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} return ipWatcher, nil } ctx, cancel := context.WithCancel(context.Background()) return &dnsWatcher{ r: r, host: host, port: port, ctx: ctx, cancel: cancel, t: time.NewTimer(0), }, nil } // dnsWatcher watches for the name resolution update for a specific target type dnsWatcher struct { r *dnsResolver host string port string // The latest resolved address set curAddrs map[string]*Update ctx context.Context cancel context.CancelFunc t *time.Timer } // ipWatcher watches for the name resolution update for an IP address. type ipWatcher struct { updateChan chan *Update } // Next returns the address resolution Update for the target. For IP address, // the resolution is itself, thus polling name server is unnecessary. Therefore, // Next() will return an Update the first time it is called, and will be blocked // for all following calls as no Update exists until watcher is closed. func (i *ipWatcher) Next() ([]*Update, error) { u, ok := <-i.updateChan if !ok { return nil, errWatcherClose } return []*Update{u}, nil } // Close closes the ipWatcher. func (i *ipWatcher) Close() { close(i.updateChan) } // AddressType indicates the address type returned by name resolution. type AddressType uint8 const ( // Backend indicates the server is a backend server. Backend AddressType = iota // GRPCLB indicates the server is a grpclb load balancer. GRPCLB ) // AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The // name resolver used by the grpclb balancer is required to provide this type of metadata in // its address updates. type AddrMetadataGRPCLB struct { // AddrType is the type of server (grpc load balancer or backend). AddrType AddressType // ServerName is the name of the grpc load balancer. Used for authentication. ServerName string } // compileUpdate compares the old resolved addresses and newly resolved addresses, // and generates an update list func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { var res []*Update for a, u := range w.curAddrs { if _, ok := newAddrs[a]; !ok { u.Op = Delete res = append(res, u) } } for a, u := range newAddrs { if _, ok := w.curAddrs[a]; !ok { res = append(res, u) } } return res } func (w *dnsWatcher) lookupSRV() map[string]*Update { newAddrs := make(map[string]*Update) _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) if err != nil { grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) return nil } for _, s := range srvs { lbAddrs, err := lookupHost(w.ctx, s.Target) if err != nil { grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err) continue } for _, a := range lbAddrs { a, ok := formatIP(a) if !ok { grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) continue } addr := a + ":" + strconv.Itoa(int(s.Port)) newAddrs[addr] = &Update{Addr: addr, Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} } } return newAddrs } func (w *dnsWatcher) lookupHost() map[string]*Update { newAddrs := make(map[string]*Update) addrs, err := lookupHost(w.ctx, w.host) if err != nil { grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) return nil } for _, a := range addrs { a, ok := formatIP(a) if !ok { grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) continue } addr := a + ":" + w.port newAddrs[addr] = &Update{Addr: addr} } return newAddrs } func (w *dnsWatcher) lookup() []*Update { newAddrs := w.lookupSRV() if newAddrs == nil { // If failed to get any balancer address (either no corresponding SRV for the // target, or caused by failure during resolution/parsing of the balancer target), // return any A record info available. newAddrs = w.lookupHost() } result := w.compileUpdate(newAddrs) w.curAddrs = newAddrs return result } // Next returns the resolved address update(delta) for the target. If there's no // change, it will sleep for 30 mins and try to resolve again after that. func (w *dnsWatcher) Next() ([]*Update, error) { for { select { case <-w.ctx.Done(): return nil, errWatcherClose case <-w.t.C: } result := w.lookup() // Next lookup should happen after an interval defined by w.r.freq. w.t.Reset(w.r.freq) if len(result) > 0 { return result, nil } } } func (w *dnsWatcher) Close() { w.cancel() }
vendor/google.golang.org/grpc/naming/dns_resolver.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.0008305965457111597, 0.00021511409431695938, 0.000163305172463879, 0.0001704813912510872, 0.000144500081660226 ]
{ "id": 8, "code_window": [ "import (\n", "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var (\n", "\tunavailableCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n", "\t\t\tName: \"aggregator_unavailable_apiservice_count\",\n", "\t\t\tHelp: \"Counter of APIServices which are marked as unavailable broken down by APIService name and reason.\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go", "type": "add", "edit_start_line_idx": 23 }
/* Copyright 2014 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( goflag "flag" "math/rand" "os" "time" "github.com/spf13/pflag" cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/logs" "k8s.io/kubernetes/cmd/kube-proxy/app" _ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration ) func main() { rand.Seed(time.Now().UnixNano()) command := app.NewProxyCommand() // TODO: once we switch everything over to Cobra commands, we can go back to calling // utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the // normalize func and add the go flag set by hand. pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) pflag.CommandLine.AddGoFlagSet(goflag.CommandLine) // utilflag.InitFlags() logs.InitLogs() defer logs.FlushLogs() if err := command.Execute(); err != nil { os.Exit(1) } }
cmd/kube-proxy/proxy.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.0004064363311044872, 0.00021198859030846506, 0.00016638300439808518, 0.00017453136388212442, 0.00008702525519765913 ]
{ "id": 8, "code_window": [ "import (\n", "\t\"k8s.io/component-base/metrics\"\n", "\t\"k8s.io/component-base/metrics/legacyregistry\"\n", ")\n", "\n", "var (\n", "\tunavailableCounter = metrics.NewCounterVec(\n", "\t\t&metrics.CounterOpts{\n", "\t\t\tName: \"aggregator_unavailable_apiservice_count\",\n", "\t\t\tHelp: \"Counter of APIServices which are marked as unavailable broken down by APIService name and reason.\",\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/kube-aggregator/pkg/controllers/status/metrics.go", "type": "add", "edit_start_line_idx": 23 }
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package network import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/network/scale" "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() { defer ginkgo.GinkgoRecover() var ( ns string ) f := framework.NewDefaultFramework("ingress-scale") ginkgo.BeforeEach(func() { ns = f.Namespace.Name }) ginkgo.Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() { var ( scaleFramework *scale.IngressScaleFramework ) ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig) if err := scaleFramework.PrepareScaleTest(); err != nil { e2elog.Failf("Unexpected error while preparing ingress scale test: %v", err) } }) ginkgo.AfterEach(func() { if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 { e2elog.Failf("Unexpected error while cleaning up ingress scale test: %v", errs) } }) ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { if errs := scaleFramework.RunScaleTest(); len(errs) != 0 { e2elog.Failf("Unexpected error while running ingress scale test: %v", errs) } }) }) })
test/e2e/network/ingress_scale.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.0002768010599538684, 0.0001974850456463173, 0.00016609483282081783, 0.00017378688789904118, 0.00004045430614496581 ]
{ "id": 9, "code_window": [ "\t// Maximum burst of requests to token URL before limiting.\n", "\ttokenURLBurst = 3\n", ")\n", "\n", "var (\n", "\tgetTokenCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/legacy-cloud-providers/gce/token_source.go", "type": "add", "edit_start_line_idx": 43 }
// +build !providerless /* Copyright 2015 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package gce import ( "context" "encoding/json" "net/http" "strings" "time" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" "k8s.io/client-go/util/flowcontrol" "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" ) const ( // Max QPS to allow through to the token URL. tokenURLQPS = .05 // back off to once every 20 seconds when failing // Maximum burst of requests to token URL before limiting. tokenURLBurst = 3 ) var ( getTokenCounter = metrics.NewCounter( &metrics.CounterOpts{ Name: "get_token_count", Help: "Counter of total Token() requests to the alternate token source", StabilityLevel: metrics.ALPHA, }, ) getTokenFailCounter = metrics.NewCounter( &metrics.CounterOpts{ Name: "get_token_fail_count", Help: "Counter of failed Token() requests to the alternate token source", StabilityLevel: metrics.ALPHA, }, ) ) func init() { legacyregistry.MustRegister(getTokenCounter) legacyregistry.MustRegister(getTokenFailCounter) } // AltTokenSource is the structure holding the data for the functionality needed to generates tokens type AltTokenSource struct { oauthClient *http.Client tokenURL string tokenBody string throttle flowcontrol.RateLimiter } // Token returns a token which may be used for authentication func (a *AltTokenSource) Token() (*oauth2.Token, error) { a.throttle.Accept() getTokenCounter.Inc() t, err := a.token() if err != nil { getTokenFailCounter.Inc() } return t, err } func (a *AltTokenSource) token() (*oauth2.Token, error) { req, err := http.NewRequest("POST", a.tokenURL, strings.NewReader(a.tokenBody)) if err != nil { return nil, err } res, err := a.oauthClient.Do(req) if err != nil { return nil, err } defer res.Body.Close() if err := googleapi.CheckResponse(res); err != nil { return nil, err } var tok struct { AccessToken string `json:"accessToken"` ExpireTime time.Time `json:"expireTime"` } if err := json.NewDecoder(res.Body).Decode(&tok); err != nil { return nil, err } return &oauth2.Token{ AccessToken: tok.AccessToken, Expiry: tok.ExpireTime, }, nil } // NewAltTokenSource constructs a new alternate token source for generating tokens. func NewAltTokenSource(tokenURL, tokenBody string) oauth2.TokenSource { client := oauth2.NewClient(context.Background(), google.ComputeTokenSource("")) a := &AltTokenSource{ oauthClient: client, tokenURL: tokenURL, tokenBody: tokenBody, throttle: flowcontrol.NewTokenBucketRateLimiter(tokenURLQPS, tokenURLBurst), } return oauth2.ReuseTokenSource(nil, a) }
staging/src/k8s.io/legacy-cloud-providers/gce/token_source.go
1
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.9989137649536133, 0.32314345240592957, 0.00016781732847448438, 0.0006252736784517765, 0.45165643095970154 ]
{ "id": 9, "code_window": [ "\t// Maximum burst of requests to token URL before limiting.\n", "\ttokenURLBurst = 3\n", ")\n", "\n", "var (\n", "\tgetTokenCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/legacy-cloud-providers/gce/token_source.go", "type": "add", "edit_start_line_idx": 43 }
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package stats import ( "time" "k8s.io/klog" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" "github.com/prometheus/client_golang/prometheus" ) // NodeResourceMetric describes a metric for the node type NodeResourceMetric struct { Name string Description string ValueFn func(stats.NodeStats) (*float64, time.Time) } func (n *NodeResourceMetric) desc() *prometheus.Desc { return prometheus.NewDesc(n.Name, n.Description, []string{}, nil) } // ContainerResourceMetric describes a metric for containers type ContainerResourceMetric struct { Name string Description string ValueFn func(stats.ContainerStats) (*float64, time.Time) } func (n *ContainerResourceMetric) desc() *prometheus.Desc { return prometheus.NewDesc(n.Name, n.Description, []string{"container", "pod", "namespace"}, nil) } // ResourceMetricsConfig specifies which metrics to collect and export type ResourceMetricsConfig struct { NodeMetrics []NodeResourceMetric ContainerMetrics []ContainerResourceMetric } // NewPrometheusResourceMetricCollector returns a prometheus.Collector which exports resource metrics func NewPrometheusResourceMetricCollector(provider SummaryProvider, config ResourceMetricsConfig) prometheus.Collector { return &resourceMetricCollector{ provider: provider, config: config, errors: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "scrape_error", Help: "1 if there was an error while getting container metrics, 0 otherwise", }), } } type resourceMetricCollector struct { provider SummaryProvider config ResourceMetricsConfig errors prometheus.Gauge } var _ prometheus.Collector = &resourceMetricCollector{} // Describe implements prometheus.Collector func (rc *resourceMetricCollector) Describe(ch chan<- *prometheus.Desc) { rc.errors.Describe(ch) for _, metric := range rc.config.NodeMetrics { ch <- metric.desc() } for _, metric := range rc.config.ContainerMetrics { ch <- metric.desc() } } // Collect implements prometheus.Collector // Since new containers are frequently created and removed, using the prometheus.Gauge Collector would // leak metric collectors for containers or pods that no longer exist. Instead, implement // prometheus.Collector in a way that only collects metrics for active containers. func (rc *resourceMetricCollector) Collect(ch chan<- prometheus.Metric) { rc.errors.Set(0) defer rc.errors.Collect(ch) summary, err := rc.provider.GetCPUAndMemoryStats() if err != nil { rc.errors.Set(1) klog.Warningf("Error getting summary for resourceMetric prometheus endpoint: %v", err) return } for _, metric := range rc.config.NodeMetrics { if value, timestamp := metric.ValueFn(summary.Node); value != nil { ch <- prometheus.NewMetricWithTimestamp(timestamp, prometheus.MustNewConstMetric(metric.desc(), prometheus.GaugeValue, *value)) } } for _, pod := range summary.Pods { for _, container := range pod.Containers { for _, metric := range rc.config.ContainerMetrics { if value, timestamp := metric.ValueFn(container); value != nil { ch <- prometheus.NewMetricWithTimestamp(timestamp, prometheus.MustNewConstMetric(metric.desc(), prometheus.GaugeValue, *value, container.Name, pod.PodRef.Name, pod.PodRef.Namespace)) } } } } }
pkg/kubelet/server/stats/prometheus_resource_metrics.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.0028267253655940294, 0.0004030287964269519, 0.00016860355390235782, 0.00017699407180771232, 0.0007310169748961926 ]
{ "id": 9, "code_window": [ "\t// Maximum burst of requests to token URL before limiting.\n", "\ttokenURLBurst = 3\n", ")\n", "\n", "var (\n", "\tgetTokenCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/legacy-cloud-providers/gce/token_source.go", "type": "add", "edit_start_line_idx": 43 }
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pluginwatcher import ( "errors" "fmt" "net" "os" "sync" "time" "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/klog" registerapi "k8s.io/kubernetes/pkg/kubelet/apis/pluginregistration/v1" "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache" v1beta1 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta1" v1beta2 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/pluginwatcher/example_plugin_apis/v1beta2" ) // examplePlugin is a sample plugin to work with plugin watcher type examplePlugin struct { grpcServer *grpc.Server wg sync.WaitGroup registrationStatus chan registerapi.RegistrationStatus // for testing endpoint string // for testing pluginName string pluginType string versions []string } type pluginServiceV1Beta1 struct { server *examplePlugin } func (s *pluginServiceV1Beta1) GetExampleInfo(ctx context.Context, rqt *v1beta1.ExampleRequest) (*v1beta1.ExampleResponse, error) { klog.Infof("GetExampleInfo v1beta1field: %s", rqt.V1Beta1Field) return &v1beta1.ExampleResponse{}, nil } func (s *pluginServiceV1Beta1) RegisterService() { v1beta1.RegisterExampleServer(s.server.grpcServer, s) } type pluginServiceV1Beta2 struct { server *examplePlugin } func (s *pluginServiceV1Beta2) GetExampleInfo(ctx context.Context, rqt *v1beta2.ExampleRequest) (*v1beta2.ExampleResponse, error) { klog.Infof("GetExampleInfo v1beta2_field: %s", rqt.V1Beta2Field) return &v1beta2.ExampleResponse{}, nil } func (s *pluginServiceV1Beta2) RegisterService() { v1beta2.RegisterExampleServer(s.server.grpcServer, s) } // NewExamplePlugin returns an initialized examplePlugin instance func NewExamplePlugin() *examplePlugin { return &examplePlugin{} } // NewTestExamplePlugin returns an initialized examplePlugin instance for testing func NewTestExamplePlugin(pluginName string, pluginType string, endpoint string, advertisedVersions ...string) *examplePlugin { return &examplePlugin{ pluginName: pluginName, pluginType: pluginType, endpoint: endpoint, versions: advertisedVersions, registrationStatus: make(chan registerapi.RegistrationStatus), } } // GetPluginInfo returns a PluginInfo object func GetPluginInfo(plugin *examplePlugin, foundInDeprecatedDir bool) cache.PluginInfo { return cache.PluginInfo{ SocketPath: plugin.endpoint, FoundInDeprecatedDir: foundInDeprecatedDir, } } // GetInfo is the RPC invoked by plugin watcher func (e *examplePlugin) GetInfo(ctx context.Context, req *registerapi.InfoRequest) (*registerapi.PluginInfo, error) { return &registerapi.PluginInfo{ Type: e.pluginType, Name: e.pluginName, Endpoint: e.endpoint, SupportedVersions: e.versions, }, nil } func (e *examplePlugin) NotifyRegistrationStatus(ctx context.Context, status *registerapi.RegistrationStatus) (*registerapi.RegistrationStatusResponse, error) { klog.Errorf("Registration is: %v\n", status) if e.registrationStatus != nil { e.registrationStatus <- *status } return &registerapi.RegistrationStatusResponse{}, nil } // Serve starts a pluginwatcher server and one or more of the plugin services func (e *examplePlugin) Serve(services ...string) error { klog.Infof("starting example server at: %s\n", e.endpoint) lis, err := net.Listen("unix", e.endpoint) if err != nil { return err } klog.Infof("example server started at: %s\n", e.endpoint) e.grpcServer = grpc.NewServer() // Registers kubelet plugin watcher api. registerapi.RegisterRegistrationServer(e.grpcServer, e) for _, service := range services { switch service { case "v1beta1": v1beta1 := &pluginServiceV1Beta1{server: e} v1beta1.RegisterService() case "v1beta2": v1beta2 := &pluginServiceV1Beta2{server: e} v1beta2.RegisterService() default: return fmt.Errorf("unsupported service: '%s'", service) } } // Starts service e.wg.Add(1) go func() { defer e.wg.Done() // Blocking call to accept incoming connections. if err := e.grpcServer.Serve(lis); err != nil { klog.Errorf("example server stopped serving: %v", err) } }() return nil } func (e *examplePlugin) Stop() error { klog.Infof("Stopping example server at: %s\n", e.endpoint) e.grpcServer.Stop() c := make(chan struct{}) go func() { defer close(c) e.wg.Wait() }() select { case <-c: break case <-time.After(time.Second): return errors.New("timed out on waiting for stop completion") } if err := os.Remove(e.endpoint); err != nil && !os.IsNotExist(err) { return err } return nil }
pkg/kubelet/pluginmanager/pluginwatcher/example_plugin.go
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.00017808051779866219, 0.0001709374482743442, 0.00016713909280952066, 0.00017065476276911795, 0.000003071943410759559 ]
{ "id": 9, "code_window": [ "\t// Maximum burst of requests to token URL before limiting.\n", "\ttokenURLBurst = 3\n", ")\n", "\n", "var (\n", "\tgetTokenCounter = metrics.NewCounter(\n", "\t\t&metrics.CounterOpts{\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "/*\n", " * By default, all the following metrics are defined as falling under\n", " * ALPHA stability level https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/20190404-kubernetes-control-plane-metrics-stability.md#stability-classes)\n", " *\n", " * Promoting the stability level of the metric is a responsibility of the component owner, since it\n", " * involves explicitly acknowledging support for the metric across multiple releases, in accordance with\n", " * the metric stability policy.\n", " */\n" ], "file_path": "staging/src/k8s.io/legacy-cloud-providers/gce/token_source.go", "type": "add", "edit_start_line_idx": 43 }
# Workqueue Example This example demonstrates how to write a controller which follows the states of watched resources. It demonstrates how to: * combine the workqueue with a cache to a full controller * synchronize the controller on startup The example is based on https://git.k8s.io/community/contributors/devel/sig-api-machinery/controllers.md. ## Running ``` # if outside of the cluster go run *.go -kubeconfig=/my/config -logtostderr=true ```
staging/src/k8s.io/client-go/examples/workqueue/README.md
0
https://github.com/kubernetes/kubernetes/commit/4e5d906c4d008f914b0ede26ea91533d6343dec5
[ 0.000170767234521918, 0.0001697797270026058, 0.00016879223403520882, 0.0001697797270026058, 9.875002433545887e-7 ]
{ "id": 0, "code_window": [ "The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it found, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for docker, as rkt manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins:\n", "* `network-plugin-dir`: Kubelet probes this directory for plugins on startup\n", "* `network-plugin`: The network plugin to use from `network-plugin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is simply \"cni\".\n", "\n", "### Exec\n", "\n", "Place plugins in `network-plugin-dir/plugin-name/plugin-name`, i.e if you have a bridge plugin and `network-plugin-dir` is `/usr/lib/kubernetes`, you'd place the bridge plugin executable at `/usr/lib/kubernetes/bridge/bridge`. See [this comment](../../pkg/kubelet/network/exec/exec.go) for more details.\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "## Network Plugin Requirements\n", "\n", "Besides providing the [`NetworkPlugin` interface](../../pkg/kubelet/network/plugins.go) to configure and clean up pod networking, the plugin may also need specific support for kube-proxy. The iptables proxy obviously depends on iptables, and the plugin may need to ensure that container traffic is made available to iptables. For example, if the plugin connects containers to a Linux bridge, the plugin must set the `net/bridge/bridge-nf-call-iptables` sysctl to `1` to ensure that the iptables proxy functions correctly. If the plugin does not use a Linux bridge (but instead something like Open vSwitch or some other mechanism) it should ensure container traffic is appropriately routed for the proxy.\n", "\n", "By default if no kubelet network plugin is specified, the `noop` plugin is used, which sets `net/bridge/bridge-nf-call-iptables=1` to ensure simple configurations (like docker with a bridge) work correctly with the iptables proxy.\n", "\n" ], "file_path": "docs/admin/network-plugins.md", "type": "add", "edit_start_line_idx": 44 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package network import ( "fmt" "net" "strings" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/validation" ) const DefaultPluginName = "kubernetes.io/no-op" // Called when the node's Pod CIDR is known when using the // controller manager's --allocate-node-cidrs=true option const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" // Plugin is an interface to network plugins for the kubelet type NetworkPlugin interface { // Init initializes the plugin. This will be called exactly once // before any other methods are called. Init(host Host) error // Called on various events like: // NET_PLUGIN_EVENT_POD_CIDR_CHANGE Event(name string, details map[string]interface{}) // Name returns the plugin's name. This will be used when searching // for a plugin by name, e.g. Name() string // SetUpPod is the method called after the infra container of // the pod has been created but before the other containers of the // pod are launched. SetUpPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // TearDownPod is the method called before a pod's infra container will be deleted TearDownPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // Status is the method called to obtain the ipv4 or ipv6 addresses of the container Status(namespace string, name string, podInfraContainerID kubecontainer.DockerID) (*PodNetworkStatus, error) } // PodNetworkStatus stores the network status of a pod (currently just the primary IP address) // This struct represents version "v1beta1" type PodNetworkStatus struct { unversioned.TypeMeta `json:",inline"` // IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that - // - kube expects to be reachable across the cluster // - service endpoints are constructed with // - will be reported in the PodStatus.PodIP field (will override the IP reported by docker) IP net.IP `json:"ip" description:"Primary IP address of the pod"` } // Host is an interface that plugins can use to access the kubelet. type Host interface { // Get the pod structure by its name, namespace GetPodByName(namespace, name string) (*api.Pod, bool) // GetKubeClient returns a client interface GetKubeClient() clientset.Interface // GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt) GetRuntime() kubecontainer.Runtime } // InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names. func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host) (NetworkPlugin, error) { if networkPluginName == "" { // default to the no_op plugin plug := &noopNetworkPlugin{} return plug, nil } pluginMap := map[string]NetworkPlugin{} allErrs := []error{} for _, plugin := range plugins { name := plugin.Name() if !validation.IsQualifiedName(name) { allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %#v", plugin)) continue } if _, found := pluginMap[name]; found { allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name)) continue } pluginMap[name] = plugin } chosenPlugin := pluginMap[networkPluginName] if chosenPlugin != nil { err := chosenPlugin.Init(host) if err != nil { allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) } else { glog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) } return chosenPlugin, utilerrors.NewAggregate(allErrs) } func UnescapePluginName(in string) string { return strings.Replace(in, "~", "/", -1) } type noopNetworkPlugin struct { } func (plugin *noopNetworkPlugin) Init(host Host) error { return nil } func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) { } func (plugin *noopNetworkPlugin) Name() string { return DefaultPluginName } func (plugin *noopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) Status(namespace string, name string, id kubecontainer.DockerID) (*PodNetworkStatus, error) { return nil, nil }
pkg/kubelet/network/plugins.go
1
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.007316843140870333, 0.002165114041417837, 0.00016838600276969373, 0.0006923949113115668, 0.0024780442472547293 ]
{ "id": 0, "code_window": [ "The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it found, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for docker, as rkt manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins:\n", "* `network-plugin-dir`: Kubelet probes this directory for plugins on startup\n", "* `network-plugin`: The network plugin to use from `network-plugin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is simply \"cni\".\n", "\n", "### Exec\n", "\n", "Place plugins in `network-plugin-dir/plugin-name/plugin-name`, i.e if you have a bridge plugin and `network-plugin-dir` is `/usr/lib/kubernetes`, you'd place the bridge plugin executable at `/usr/lib/kubernetes/bridge/bridge`. See [this comment](../../pkg/kubelet/network/exec/exec.go) for more details.\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "## Network Plugin Requirements\n", "\n", "Besides providing the [`NetworkPlugin` interface](../../pkg/kubelet/network/plugins.go) to configure and clean up pod networking, the plugin may also need specific support for kube-proxy. The iptables proxy obviously depends on iptables, and the plugin may need to ensure that container traffic is made available to iptables. For example, if the plugin connects containers to a Linux bridge, the plugin must set the `net/bridge/bridge-nf-call-iptables` sysctl to `1` to ensure that the iptables proxy functions correctly. If the plugin does not use a Linux bridge (but instead something like Open vSwitch or some other mechanism) it should ensure container traffic is appropriately routed for the proxy.\n", "\n", "By default if no kubelet network plugin is specified, the `noop` plugin is used, which sets `net/bridge/bridge-nf-call-iptables=1` to ensure simple configurations (like docker with a bridge) work correctly with the iptables proxy.\n", "\n" ], "file_path": "docs/admin/network-plugins.md", "type": "add", "edit_start_line_idx": 44 }
foo.bar[*].baz
Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-596
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.0001642012648517266, 0.0001642012648517266, 0.0001642012648517266, 0.0001642012648517266, 0 ]
{ "id": 0, "code_window": [ "The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it found, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for docker, as rkt manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins:\n", "* `network-plugin-dir`: Kubelet probes this directory for plugins on startup\n", "* `network-plugin`: The network plugin to use from `network-plugin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is simply \"cni\".\n", "\n", "### Exec\n", "\n", "Place plugins in `network-plugin-dir/plugin-name/plugin-name`, i.e if you have a bridge plugin and `network-plugin-dir` is `/usr/lib/kubernetes`, you'd place the bridge plugin executable at `/usr/lib/kubernetes/bridge/bridge`. See [this comment](../../pkg/kubelet/network/exec/exec.go) for more details.\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "## Network Plugin Requirements\n", "\n", "Besides providing the [`NetworkPlugin` interface](../../pkg/kubelet/network/plugins.go) to configure and clean up pod networking, the plugin may also need specific support for kube-proxy. The iptables proxy obviously depends on iptables, and the plugin may need to ensure that container traffic is made available to iptables. For example, if the plugin connects containers to a Linux bridge, the plugin must set the `net/bridge/bridge-nf-call-iptables` sysctl to `1` to ensure that the iptables proxy functions correctly. If the plugin does not use a Linux bridge (but instead something like Open vSwitch or some other mechanism) it should ensure container traffic is appropriately routed for the proxy.\n", "\n", "By default if no kubelet network plugin is specified, the `noop` plugin is used, which sets `net/bridge/bridge-nf-call-iptables=1` to ensure simple configurations (like docker with a bridge) work correctly with the iptables proxy.\n", "\n" ], "file_path": "docs/admin/network-plugins.md", "type": "add", "edit_start_line_idx": 44 }
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ssh import ( "crypto/aes" "crypto/cipher" "crypto/rc4" "crypto/subtle" "encoding/binary" "errors" "fmt" "hash" "io" ) const ( packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC // indicates implementations SHOULD be able to handle larger packet sizes, but then // waffles on about reasonable limits. // // OpenSSH caps their maxPacket at 256kB so we choose to do // the same. maxPacket is also used to ensure that uint32 // length fields do not overflow, so it should remain well // below 4G. maxPacket = 256 * 1024 ) // noneCipher implements cipher.Stream and provides no encryption. It is used // by the transport before the first key-exchange. type noneCipher struct{} func (c noneCipher) XORKeyStream(dst, src []byte) { copy(dst, src) } func newAESCTR(key, iv []byte) (cipher.Stream, error) { c, err := aes.NewCipher(key) if err != nil { return nil, err } return cipher.NewCTR(c, iv), nil } func newRC4(key, iv []byte) (cipher.Stream, error) { return rc4.NewCipher(key) } type streamCipherMode struct { keySize int ivSize int skip int createFunc func(key, iv []byte) (cipher.Stream, error) } func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) { if len(key) < c.keySize { panic("ssh: key length too small for cipher") } if len(iv) < c.ivSize { panic("ssh: iv too small for cipher") } stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize]) if err != nil { return nil, err } var streamDump []byte if c.skip > 0 { streamDump = make([]byte, 512) } for remainingToDump := c.skip; remainingToDump > 0; { dumpThisTime := remainingToDump if dumpThisTime > len(streamDump) { dumpThisTime = len(streamDump) } stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) remainingToDump -= dumpThisTime } return stream, nil } // cipherModes documents properties of supported ciphers. Ciphers not included // are not supported and will not be negotiated, even if explicitly requested in // ClientConfig.Crypto.Ciphers. var cipherModes = map[string]*streamCipherMode{ // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms // are defined in the order specified in the RFC. "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR}, "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR}, "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR}, // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. // They are defined in the order specified in the RFC. "arcfour128": {16, 0, 1536, newRC4}, "arcfour256": {32, 0, 1536, newRC4}, // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and // RC4) has problems with weak keys, and should be used with caution." // RFC4345 introduces improved versions of Arcfour. "arcfour": {16, 0, 0, newRC4}, // AES-GCM is not a stream cipher, so it is constructed with a // special case. If we add any more non-stream ciphers, we // should invest a cleaner way to do this. gcmCipherID: {16, 12, 0, nil}, // insecure cipher, see http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf // uncomment below to enable it. // aes128cbcID: {16, aes.BlockSize, 0, nil}, } // prefixLen is the length of the packet prefix that contains the packet length // and number of padding bytes. const prefixLen = 5 // streamPacketCipher is a packetCipher using a stream cipher. type streamPacketCipher struct { mac hash.Hash cipher cipher.Stream // The following members are to avoid per-packet allocations. prefix [prefixLen]byte seqNumBytes [4]byte padding [2 * packetSizeMultiple]byte packetData []byte macResult []byte } // readPacket reads and decrypt a single packet from the reader argument. func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { if _, err := io.ReadFull(r, s.prefix[:]); err != nil { return nil, err } s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) length := binary.BigEndian.Uint32(s.prefix[0:4]) paddingLength := uint32(s.prefix[4]) var macSize uint32 if s.mac != nil { s.mac.Reset() binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) s.mac.Write(s.seqNumBytes[:]) s.mac.Write(s.prefix[:]) macSize = uint32(s.mac.Size()) } if length <= paddingLength+1 { return nil, errors.New("ssh: invalid packet length, packet too small") } if length > maxPacket { return nil, errors.New("ssh: invalid packet length, packet too large") } // the maxPacket check above ensures that length-1+macSize // does not overflow. if uint32(cap(s.packetData)) < length-1+macSize { s.packetData = make([]byte, length-1+macSize) } else { s.packetData = s.packetData[:length-1+macSize] } if _, err := io.ReadFull(r, s.packetData); err != nil { return nil, err } mac := s.packetData[length-1:] data := s.packetData[:length-1] s.cipher.XORKeyStream(data, data) if s.mac != nil { s.mac.Write(data) s.macResult = s.mac.Sum(s.macResult[:0]) if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { return nil, errors.New("ssh: MAC failure") } } return s.packetData[:length-paddingLength-1], nil } // writePacket encrypts and sends a packet of data to the writer argument func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { if len(packet) > maxPacket { return errors.New("ssh: packet too large") } paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple if paddingLength < 4 { paddingLength += packetSizeMultiple } length := len(packet) + 1 + paddingLength binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) s.prefix[4] = byte(paddingLength) padding := s.padding[:paddingLength] if _, err := io.ReadFull(rand, padding); err != nil { return err } if s.mac != nil { s.mac.Reset() binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) s.mac.Write(s.seqNumBytes[:]) s.mac.Write(s.prefix[:]) s.mac.Write(packet) s.mac.Write(padding) } s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) s.cipher.XORKeyStream(packet, packet) s.cipher.XORKeyStream(padding, padding) if _, err := w.Write(s.prefix[:]); err != nil { return err } if _, err := w.Write(packet); err != nil { return err } if _, err := w.Write(padding); err != nil { return err } if s.mac != nil { s.macResult = s.mac.Sum(s.macResult[:0]) if _, err := w.Write(s.macResult); err != nil { return err } } return nil } type gcmCipher struct { aead cipher.AEAD prefix [4]byte iv []byte buf []byte } func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) { c, err := aes.NewCipher(key) if err != nil { return nil, err } aead, err := cipher.NewGCM(c) if err != nil { return nil, err } return &gcmCipher{ aead: aead, iv: iv, }, nil } const gcmTagSize = 16 func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { // Pad out to multiple of 16 bytes. This is different from the // stream cipher because that encrypts the length too. padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) if padding < 4 { padding += packetSizeMultiple } length := uint32(len(packet) + int(padding) + 1) binary.BigEndian.PutUint32(c.prefix[:], length) if _, err := w.Write(c.prefix[:]); err != nil { return err } if cap(c.buf) < int(length) { c.buf = make([]byte, length) } else { c.buf = c.buf[:length] } c.buf[0] = padding copy(c.buf[1:], packet) if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { return err } c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) if _, err := w.Write(c.buf); err != nil { return err } c.incIV() return nil } func (c *gcmCipher) incIV() { for i := 4 + 7; i >= 4; i-- { c.iv[i]++ if c.iv[i] != 0 { break } } } func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { if _, err := io.ReadFull(r, c.prefix[:]); err != nil { return nil, err } length := binary.BigEndian.Uint32(c.prefix[:]) if length > maxPacket { return nil, errors.New("ssh: max packet length exceeded.") } if cap(c.buf) < int(length+gcmTagSize) { c.buf = make([]byte, length+gcmTagSize) } else { c.buf = c.buf[:length+gcmTagSize] } if _, err := io.ReadFull(r, c.buf); err != nil { return nil, err } plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) if err != nil { return nil, err } c.incIV() padding := plain[0] if padding < 4 || padding >= 20 { return nil, fmt.Errorf("ssh: illegal padding %d", padding) } if int(padding+1) >= len(plain) { return nil, fmt.Errorf("ssh: padding %d too large", padding) } plain = plain[1 : length-uint32(padding)] return plain, nil } // cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 type cbcCipher struct { mac hash.Hash decrypter cipher.BlockMode encrypter cipher.BlockMode // The following members are to avoid per-packet allocations. seqNumBytes [4]byte packetData []byte macResult []byte } func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { c, err := aes.NewCipher(key) if err != nil { return nil, err } return &cbcCipher{ mac: macModes[algs.MAC].new(macKey), decrypter: cipher.NewCBCDecrypter(c, iv), encrypter: cipher.NewCBCEncrypter(c, iv), packetData: make([]byte, 1024), }, nil } func maxUInt32(a, b int) uint32 { if a > b { return uint32(a) } return uint32(b) } const ( cbcMinPacketSizeMultiple = 8 cbcMinPacketSize = 16 cbcMinPaddingSize = 4 ) func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { blockSize := c.decrypter.BlockSize() // Read the header, which will include some of the subsequent data in the // case of block ciphers - this is copied back to the payload later. // How many bytes of payload/padding will be read with this first read. firstBlockLength := (prefixLen + blockSize - 1) / blockSize * blockSize firstBlock := c.packetData[:firstBlockLength] if _, err := io.ReadFull(r, firstBlock); err != nil { return nil, err } c.decrypter.CryptBlocks(firstBlock, firstBlock) length := binary.BigEndian.Uint32(firstBlock[:4]) if length > maxPacket { return nil, errors.New("ssh: packet too large") } if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { // The minimum size of a packet is 16 (or the cipher block size, whichever // is larger) bytes. return nil, errors.New("ssh: packet too small") } // The length of the packet (including the length field but not the MAC) must // be a multiple of the block size or 8, whichever is larger. if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { return nil, errors.New("ssh: invalid packet length multiple") } paddingLength := uint32(firstBlock[4]) if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { return nil, errors.New("ssh: invalid packet length") } var macSize uint32 if c.mac != nil { macSize = uint32(c.mac.Size()) } // Positions within the c.packetData buffer: macStart := 4 + length paddingStart := macStart - paddingLength // Entire packet size, starting before length, ending at end of mac. entirePacketSize := macStart + macSize // Ensure c.packetData is large enough for the entire packet data. if uint32(cap(c.packetData)) < entirePacketSize { // Still need to upsize and copy, but this should be rare at runtime, only // on upsizing the packetData buffer. c.packetData = make([]byte, entirePacketSize) copy(c.packetData, firstBlock) } else { c.packetData = c.packetData[:entirePacketSize] } if _, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil { return nil, err } remainingCrypted := c.packetData[firstBlockLength:macStart] c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) mac := c.packetData[macStart:] if c.mac != nil { c.mac.Reset() binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) c.mac.Write(c.seqNumBytes[:]) c.mac.Write(c.packetData[:macStart]) c.macResult = c.mac.Sum(c.macResult[:0]) if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { return nil, errors.New("ssh: MAC failure") } } return c.packetData[prefixLen:paddingStart], nil } func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) // Length of encrypted portion of the packet (header, payload, padding). // Enforce minimum padding and packet size. encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) // Enforce block size. encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize length := encLength - 4 paddingLength := int(length) - (1 + len(packet)) var macSize uint32 if c.mac != nil { macSize = uint32(c.mac.Size()) } // Overall buffer contains: header, payload, padding, mac. // Space for the MAC is reserved in the capacity but not the slice length. bufferSize := encLength + macSize if uint32(cap(c.packetData)) < bufferSize { c.packetData = make([]byte, encLength, bufferSize) } else { c.packetData = c.packetData[:encLength] } p := c.packetData // Packet header. binary.BigEndian.PutUint32(p, length) p = p[4:] p[0] = byte(paddingLength) // Payload. p = p[1:] copy(p, packet) // Padding. p = p[len(packet):] if _, err := io.ReadFull(rand, p); err != nil { return err } if c.mac != nil { c.mac.Reset() binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) c.mac.Write(c.seqNumBytes[:]) c.mac.Write(c.packetData) // The MAC is now appended into the capacity reserved for it earlier. c.packetData = c.mac.Sum(c.packetData) } c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) if _, err := w.Write(c.packetData); err != nil { return err } return nil }
Godeps/_workspace/src/golang.org/x/crypto/ssh/cipher.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.0001769522059476003, 0.0001660607085796073, 0.0001640342961763963, 0.00016561755910515785, 0.000002011061951634474 ]
{ "id": 0, "code_window": [ "The kubelet has a single default network plugin, and a default network common to the entire cluster. It probes for plugins when it starts up, remembers what it found, and executes the selected plugin at appropriate times in the pod lifecycle (this is only true for docker, as rkt manages its own CNI plugins). There are two Kubelet command line parameters to keep in mind when using plugins:\n", "* `network-plugin-dir`: Kubelet probes this directory for plugins on startup\n", "* `network-plugin`: The network plugin to use from `network-plugin-dir`. It must match the name reported by a plugin probed from the plugin directory. For CNI plugins, this is simply \"cni\".\n", "\n", "### Exec\n", "\n", "Place plugins in `network-plugin-dir/plugin-name/plugin-name`, i.e if you have a bridge plugin and `network-plugin-dir` is `/usr/lib/kubernetes`, you'd place the bridge plugin executable at `/usr/lib/kubernetes/bridge/bridge`. See [this comment](../../pkg/kubelet/network/exec/exec.go) for more details.\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "## Network Plugin Requirements\n", "\n", "Besides providing the [`NetworkPlugin` interface](../../pkg/kubelet/network/plugins.go) to configure and clean up pod networking, the plugin may also need specific support for kube-proxy. The iptables proxy obviously depends on iptables, and the plugin may need to ensure that container traffic is made available to iptables. For example, if the plugin connects containers to a Linux bridge, the plugin must set the `net/bridge/bridge-nf-call-iptables` sysctl to `1` to ensure that the iptables proxy functions correctly. If the plugin does not use a Linux bridge (but instead something like Open vSwitch or some other mechanism) it should ensure container traffic is appropriately routed for the proxy.\n", "\n", "By default if no kubelet network plugin is specified, the `noop` plugin is used, which sets `net/bridge/bridge-nf-call-iptables=1` to ensure simple configurations (like docker with a bridge) work correctly with the iptables proxy.\n", "\n" ], "file_path": "docs/admin/network-plugins.md", "type": "add", "edit_start_line_idx": 44 }
sort(numbers)
Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-175
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00016971291915979236, 0.00016971291915979236, 0.00016971291915979236, 0.00016971291915979236, 0 ]
{ "id": 1, "code_window": [ "\t\"github.com/golang/glog\"\n", "\t\"k8s.io/kubernetes/pkg/api\"\n", "\t\"k8s.io/kubernetes/pkg/api/unversioned\"\n", "\tkubecontainer \"k8s.io/kubernetes/pkg/kubelet/container\"\n", "\tutilerrors \"k8s.io/kubernetes/pkg/util/errors\"\n", "\t\"k8s.io/kubernetes/pkg/util/validation\"\n", ")\n", "\n", "const DefaultPluginName = \"kubernetes.io/no-op\"\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tutilexec \"k8s.io/kubernetes/pkg/util/exec\"\n", "\tutilsysctl \"k8s.io/kubernetes/pkg/util/sysctl\"\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 30 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package network import ( "fmt" "net" "strings" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/validation" ) const DefaultPluginName = "kubernetes.io/no-op" // Called when the node's Pod CIDR is known when using the // controller manager's --allocate-node-cidrs=true option const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" // Plugin is an interface to network plugins for the kubelet type NetworkPlugin interface { // Init initializes the plugin. This will be called exactly once // before any other methods are called. Init(host Host) error // Called on various events like: // NET_PLUGIN_EVENT_POD_CIDR_CHANGE Event(name string, details map[string]interface{}) // Name returns the plugin's name. This will be used when searching // for a plugin by name, e.g. Name() string // SetUpPod is the method called after the infra container of // the pod has been created but before the other containers of the // pod are launched. SetUpPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // TearDownPod is the method called before a pod's infra container will be deleted TearDownPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // Status is the method called to obtain the ipv4 or ipv6 addresses of the container Status(namespace string, name string, podInfraContainerID kubecontainer.DockerID) (*PodNetworkStatus, error) } // PodNetworkStatus stores the network status of a pod (currently just the primary IP address) // This struct represents version "v1beta1" type PodNetworkStatus struct { unversioned.TypeMeta `json:",inline"` // IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that - // - kube expects to be reachable across the cluster // - service endpoints are constructed with // - will be reported in the PodStatus.PodIP field (will override the IP reported by docker) IP net.IP `json:"ip" description:"Primary IP address of the pod"` } // Host is an interface that plugins can use to access the kubelet. type Host interface { // Get the pod structure by its name, namespace GetPodByName(namespace, name string) (*api.Pod, bool) // GetKubeClient returns a client interface GetKubeClient() clientset.Interface // GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt) GetRuntime() kubecontainer.Runtime } // InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names. func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host) (NetworkPlugin, error) { if networkPluginName == "" { // default to the no_op plugin plug := &noopNetworkPlugin{} return plug, nil } pluginMap := map[string]NetworkPlugin{} allErrs := []error{} for _, plugin := range plugins { name := plugin.Name() if !validation.IsQualifiedName(name) { allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %#v", plugin)) continue } if _, found := pluginMap[name]; found { allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name)) continue } pluginMap[name] = plugin } chosenPlugin := pluginMap[networkPluginName] if chosenPlugin != nil { err := chosenPlugin.Init(host) if err != nil { allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) } else { glog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) } return chosenPlugin, utilerrors.NewAggregate(allErrs) } func UnescapePluginName(in string) string { return strings.Replace(in, "~", "/", -1) } type noopNetworkPlugin struct { } func (plugin *noopNetworkPlugin) Init(host Host) error { return nil } func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) { } func (plugin *noopNetworkPlugin) Name() string { return DefaultPluginName } func (plugin *noopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) Status(namespace string, name string, id kubecontainer.DockerID) (*PodNetworkStatus, error) { return nil, nil }
pkg/kubelet/network/plugins.go
1
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.9986500144004822, 0.13326495885849, 0.0001667855103733018, 0.0002942286664620042, 0.32791343331336975 ]
{ "id": 1, "code_window": [ "\t\"github.com/golang/glog\"\n", "\t\"k8s.io/kubernetes/pkg/api\"\n", "\t\"k8s.io/kubernetes/pkg/api/unversioned\"\n", "\tkubecontainer \"k8s.io/kubernetes/pkg/kubelet/container\"\n", "\tutilerrors \"k8s.io/kubernetes/pkg/util/errors\"\n", "\t\"k8s.io/kubernetes/pkg/util/validation\"\n", ")\n", "\n", "const DefaultPluginName = \"kubernetes.io/no-op\"\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tutilexec \"k8s.io/kubernetes/pkg/util/exec\"\n", "\tutilsysctl \"k8s.io/kubernetes/pkg/util/sysctl\"\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 30 }
`[0, 1, 2]`[1]
Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-406
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00016908331599552184, 0.00016908331599552184, 0.00016908331599552184, 0.00016908331599552184, 0 ]
{ "id": 1, "code_window": [ "\t\"github.com/golang/glog\"\n", "\t\"k8s.io/kubernetes/pkg/api\"\n", "\t\"k8s.io/kubernetes/pkg/api/unversioned\"\n", "\tkubecontainer \"k8s.io/kubernetes/pkg/kubelet/container\"\n", "\tutilerrors \"k8s.io/kubernetes/pkg/util/errors\"\n", "\t\"k8s.io/kubernetes/pkg/util/validation\"\n", ")\n", "\n", "const DefaultPluginName = \"kubernetes.io/no-op\"\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tutilexec \"k8s.io/kubernetes/pkg/util/exec\"\n", "\tutilsysctl \"k8s.io/kubernetes/pkg/util/sysctl\"\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 30 }
/* * * Copyright 2015, Google Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * Neither the name of Google Inc. nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ // Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries // to perform unary, client streaming, server streaming and full duplex RPCs. // // It interacts with the route guide service whose definition can be found in proto/route_guide.proto. package main import ( "flag" "io" "math/rand" "time" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/credentials" pb "google.golang.org/grpc/examples/route_guide/routeguide" "google.golang.org/grpc/grpclog" ) var ( tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") caFile = flag.String("ca_file", "testdata/ca.pem", "The file containning the CA root cert file") serverAddr = flag.String("server_addr", "127.0.0.1:10000", "The server address in the format of host:port") serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake") ) // printFeature gets the feature for the given point. func printFeature(client pb.RouteGuideClient, point *pb.Point) { grpclog.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) feature, err := client.GetFeature(context.Background(), point) if err != nil { grpclog.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) } grpclog.Println(feature) } // printFeatures lists all the features within the given bounding Rectangle. func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { grpclog.Printf("Looking for features within %v", rect) stream, err := client.ListFeatures(context.Background(), rect) if err != nil { grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } for { feature, err := stream.Recv() if err == io.EOF { break } if err != nil { grpclog.Fatalf("%v.ListFeatures(_) = _, %v", client, err) } grpclog.Println(feature) } } // runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. func runRecordRoute(client pb.RouteGuideClient) { // Create a random number of random points r := rand.New(rand.NewSource(time.Now().UnixNano())) pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points var points []*pb.Point for i := 0; i < pointCount; i++ { points = append(points, randomPoint(r)) } grpclog.Printf("Traversing %d points.", len(points)) stream, err := client.RecordRoute(context.Background()) if err != nil { grpclog.Fatalf("%v.RecordRoute(_) = _, %v", client, err) } for _, point := range points { if err := stream.Send(point); err != nil { grpclog.Fatalf("%v.Send(%v) = %v", stream, point, err) } } reply, err := stream.CloseAndRecv() if err != nil { grpclog.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) } grpclog.Printf("Route summary: %v", reply) } // runRouteChat receives a sequence of route notes, while sending notes for various locations. func runRouteChat(client pb.RouteGuideClient) { notes := []*pb.RouteNote{ {&pb.Point{0, 1}, "First message"}, {&pb.Point{0, 2}, "Second message"}, {&pb.Point{0, 3}, "Third message"}, {&pb.Point{0, 1}, "Fourth message"}, {&pb.Point{0, 2}, "Fifth message"}, {&pb.Point{0, 3}, "Sixth message"}, } stream, err := client.RouteChat(context.Background()) if err != nil { grpclog.Fatalf("%v.RouteChat(_) = _, %v", client, err) } waitc := make(chan struct{}) go func() { for { in, err := stream.Recv() if err == io.EOF { // read done. close(waitc) return } if err != nil { grpclog.Fatalf("Failed to receive a note : %v", err) } grpclog.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) } }() for _, note := range notes { if err := stream.Send(note); err != nil { grpclog.Fatalf("Failed to send a note: %v", err) } } stream.CloseSend() <-waitc } func randomPoint(r *rand.Rand) *pb.Point { lat := (r.Int31n(180) - 90) * 1e7 long := (r.Int31n(360) - 180) * 1e7 return &pb.Point{lat, long} } func main() { flag.Parse() var opts []grpc.DialOption if *tls { var sn string if *serverHostOverride != "" { sn = *serverHostOverride } var creds credentials.TransportAuthenticator if *caFile != "" { var err error creds, err = credentials.NewClientTLSFromFile(*caFile, sn) if err != nil { grpclog.Fatalf("Failed to create TLS credentials %v", err) } } else { creds = credentials.NewClientTLSFromCert(nil, sn) } opts = append(opts, grpc.WithTransportCredentials(creds)) } else { opts = append(opts, grpc.WithInsecure()) } conn, err := grpc.Dial(*serverAddr, opts...) if err != nil { grpclog.Fatalf("fail to dial: %v", err) } defer conn.Close() client := pb.NewRouteGuideClient(conn) // Looking for a valid feature printFeature(client, &pb.Point{409146138, -746188906}) // Feature missing. printFeature(client, &pb.Point{0, 0}) // Looking for features between 40, -75 and 42, -73. printFeatures(client, &pb.Rectangle{&pb.Point{400000000, -750000000}, &pb.Point{420000000, -730000000}}) // RecordRoute runRecordRoute(client) // RouteChat runRouteChat(client) }
Godeps/_workspace/src/google.golang.org/grpc/examples/route_guide/client/client.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00017598587146494538, 0.00016940172645263374, 0.00015935543342493474, 0.0001689917698968202, 0.000004386447471915744 ]
{ "id": 1, "code_window": [ "\t\"github.com/golang/glog\"\n", "\t\"k8s.io/kubernetes/pkg/api\"\n", "\t\"k8s.io/kubernetes/pkg/api/unversioned\"\n", "\tkubecontainer \"k8s.io/kubernetes/pkg/kubelet/container\"\n", "\tutilerrors \"k8s.io/kubernetes/pkg/util/errors\"\n", "\t\"k8s.io/kubernetes/pkg/util/validation\"\n", ")\n", "\n", "const DefaultPluginName = \"kubernetes.io/no-op\"\n", "\n" ], "labels": [ "keep", "keep", "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\tutilexec \"k8s.io/kubernetes/pkg/util/exec\"\n", "\tutilsysctl \"k8s.io/kubernetes/pkg/util/sysctl\"\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 30 }
type(`null`)
Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-133
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00016955456521827728, 0.00016955456521827728, 0.00016955456521827728, 0.00016955456521827728, 0 ]
{ "id": 2, "code_window": [ "\tif networkPluginName == \"\" {\n", "\t\t// default to the no_op plugin\n", "\t\tplug := &noopNetworkPlugin{}\n", "\t\treturn plug, nil\n", "\t}\n", "\n", "\tpluginMap := map[string]NetworkPlugin{}\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tif err := plug.Init(host); err != nil {\n", "\t\t\treturn nil, err\n", "\t\t}\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 95 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package network import ( "fmt" "net" "strings" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/validation" ) const DefaultPluginName = "kubernetes.io/no-op" // Called when the node's Pod CIDR is known when using the // controller manager's --allocate-node-cidrs=true option const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" // Plugin is an interface to network plugins for the kubelet type NetworkPlugin interface { // Init initializes the plugin. This will be called exactly once // before any other methods are called. Init(host Host) error // Called on various events like: // NET_PLUGIN_EVENT_POD_CIDR_CHANGE Event(name string, details map[string]interface{}) // Name returns the plugin's name. This will be used when searching // for a plugin by name, e.g. Name() string // SetUpPod is the method called after the infra container of // the pod has been created but before the other containers of the // pod are launched. SetUpPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // TearDownPod is the method called before a pod's infra container will be deleted TearDownPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // Status is the method called to obtain the ipv4 or ipv6 addresses of the container Status(namespace string, name string, podInfraContainerID kubecontainer.DockerID) (*PodNetworkStatus, error) } // PodNetworkStatus stores the network status of a pod (currently just the primary IP address) // This struct represents version "v1beta1" type PodNetworkStatus struct { unversioned.TypeMeta `json:",inline"` // IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that - // - kube expects to be reachable across the cluster // - service endpoints are constructed with // - will be reported in the PodStatus.PodIP field (will override the IP reported by docker) IP net.IP `json:"ip" description:"Primary IP address of the pod"` } // Host is an interface that plugins can use to access the kubelet. type Host interface { // Get the pod structure by its name, namespace GetPodByName(namespace, name string) (*api.Pod, bool) // GetKubeClient returns a client interface GetKubeClient() clientset.Interface // GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt) GetRuntime() kubecontainer.Runtime } // InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names. func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host) (NetworkPlugin, error) { if networkPluginName == "" { // default to the no_op plugin plug := &noopNetworkPlugin{} return plug, nil } pluginMap := map[string]NetworkPlugin{} allErrs := []error{} for _, plugin := range plugins { name := plugin.Name() if !validation.IsQualifiedName(name) { allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %#v", plugin)) continue } if _, found := pluginMap[name]; found { allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name)) continue } pluginMap[name] = plugin } chosenPlugin := pluginMap[networkPluginName] if chosenPlugin != nil { err := chosenPlugin.Init(host) if err != nil { allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) } else { glog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) } return chosenPlugin, utilerrors.NewAggregate(allErrs) } func UnescapePluginName(in string) string { return strings.Replace(in, "~", "/", -1) } type noopNetworkPlugin struct { } func (plugin *noopNetworkPlugin) Init(host Host) error { return nil } func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) { } func (plugin *noopNetworkPlugin) Name() string { return DefaultPluginName } func (plugin *noopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) Status(namespace string, name string, id kubecontainer.DockerID) (*PodNetworkStatus, error) { return nil, nil }
pkg/kubelet/network/plugins.go
1
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.9991638660430908, 0.18859489262104034, 0.000162530763191171, 0.0016367345815524459, 0.3891122043132782 ]
{ "id": 2, "code_window": [ "\tif networkPluginName == \"\" {\n", "\t\t// default to the no_op plugin\n", "\t\tplug := &noopNetworkPlugin{}\n", "\t\treturn plug, nil\n", "\t}\n", "\n", "\tpluginMap := map[string]NetworkPlugin{}\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tif err := plug.Init(host); err != nil {\n", "\t\t\treturn nil, err\n", "\t\t}\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 95 }
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package kubelet import ( "k8s.io/kubernetes/pkg/api" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/types" ) // fakePodWorkers runs sync pod function in serial, so we can have // deterministic behaviour in testing. type fakePodWorkers struct { syncPodFn syncPodFnType cache kubecontainer.Cache t TestingInterface } func (f *fakePodWorkers) UpdatePod(pod *api.Pod, mirrorPod *api.Pod, updateType kubetypes.SyncPodType, updateComplete func()) { status, err := f.cache.Get(pod.UID) if err != nil { f.t.Errorf("Unexpected error: %v", err) } if err := f.syncPodFn(pod, mirrorPod, status, kubetypes.SyncPodUpdate); err != nil { f.t.Errorf("Unexpected error: %v", err) } } func (f *fakePodWorkers) ForgetNonExistingPodWorkers(desiredPods map[types.UID]empty) {} func (f *fakePodWorkers) ForgetWorker(uid types.UID) {} type TestingInterface interface { Errorf(format string, args ...interface{}) }
pkg/kubelet/fake_pod_workers.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.0003249230794608593, 0.00020005762053187937, 0.0001686347386566922, 0.0001781956380000338, 0.00005596866685664281 ]
{ "id": 2, "code_window": [ "\tif networkPluginName == \"\" {\n", "\t\t// default to the no_op plugin\n", "\t\tplug := &noopNetworkPlugin{}\n", "\t\treturn plug, nil\n", "\t}\n", "\n", "\tpluginMap := map[string]NetworkPlugin{}\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tif err := plug.Init(host); err != nil {\n", "\t\t\treturn nil, err\n", "\t\t}\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 95 }
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package scheduler import ( "fmt" "strings" "sync" "time" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "github.com/golang/glog" ) var ( _ = SystemModeler(&FakeModeler{}) _ = SystemModeler(&SimpleModeler{}) ) // ExtendedPodLister: SimpleModeler needs to be able to check for a pod's // existence in addition to listing the pods. type ExtendedPodLister interface { algorithm.PodLister Exists(pod *api.Pod) (bool, error) } // actionLocker implements lockedAction (so the fake and SimpleModeler can both // use it) type actionLocker struct { sync.Mutex } // LockedAction serializes calls of whatever is passed as 'do'. func (a *actionLocker) LockedAction(do func()) { a.Lock() defer a.Unlock() do() } // FakeModeler implements the SystemModeler interface. type FakeModeler struct { AssumePodFunc func(pod *api.Pod) ForgetPodFunc func(pod *api.Pod) ForgetPodByKeyFunc func(key string) actionLocker } // AssumePod calls the function variable if it is not nil. func (f *FakeModeler) AssumePod(pod *api.Pod) { if f.AssumePodFunc != nil { f.AssumePodFunc(pod) } } // ForgetPod calls the function variable if it is not nil. func (f *FakeModeler) ForgetPod(pod *api.Pod) { if f.ForgetPodFunc != nil { f.ForgetPodFunc(pod) } } // ForgetPodByKey calls the function variable if it is not nil. func (f *FakeModeler) ForgetPodByKey(key string) { if f.ForgetPodFunc != nil { f.ForgetPodByKeyFunc(key) } } // SimpleModeler implements the SystemModeler interface with a timed pod cache. type SimpleModeler struct { queuedPods ExtendedPodLister scheduledPods ExtendedPodLister // assumedPods holds the pods that we think we've scheduled, but that // haven't yet shown up in the scheduledPods variable. // TODO: periodically clear this. assumedPods *cache.StoreToPodLister actionLocker } // NewSimpleModeler returns a new SimpleModeler. // queuedPods: a PodLister that will return pods that have not been scheduled yet. // scheduledPods: a PodLister that will return pods that we know for sure have been scheduled. func NewSimpleModeler(queuedPods, scheduledPods ExtendedPodLister) *SimpleModeler { return &SimpleModeler{ queuedPods: queuedPods, scheduledPods: scheduledPods, assumedPods: &cache.StoreToPodLister{ Store: cache.NewTTLStore(cache.MetaNamespaceKeyFunc, 30*time.Second), }, } } func (s *SimpleModeler) AssumePod(pod *api.Pod) { s.assumedPods.Add(pod) } func (s *SimpleModeler) ForgetPod(pod *api.Pod) { s.assumedPods.Delete(pod) } func (s *SimpleModeler) ForgetPodByKey(key string) { s.assumedPods.Delete(cache.ExplicitKey(key)) } // Extract names for readable logging. func podNames(pods []*api.Pod) []string { out := make([]string, len(pods)) for i := range pods { out[i] = fmt.Sprintf("'%v/%v (%v)'", pods[i].Namespace, pods[i].Name, pods[i].UID) } return out } func (s *SimpleModeler) listPods(selector labels.Selector) (pods []*api.Pod, err error) { assumed, err := s.assumedPods.List(selector) if err != nil { return nil, err } // Since the assumed list will be short, just check every one. // Goal here is to stop making assumptions about a pod once it shows // up in one of these other lists. for _, pod := range assumed { qExist, err := s.queuedPods.Exists(pod) if err != nil { return nil, err } if qExist { s.assumedPods.Store.Delete(pod) continue } sExist, err := s.scheduledPods.Exists(pod) if err != nil { return nil, err } if sExist { s.assumedPods.Store.Delete(pod) continue } } scheduled, err := s.scheduledPods.List(selector) if err != nil { return nil, err } // Listing purges the ttl cache and re-gets, in case we deleted any entries. assumed, err = s.assumedPods.List(selector) if err != nil { return nil, err } if len(assumed) == 0 { return scheduled, nil } glog.V(2).Infof( "listing pods: [%v] assumed to exist in addition to %v known pods.", strings.Join(podNames(assumed), ","), len(scheduled), ) return append(scheduled, assumed...), nil } // PodLister returns a PodLister that will list pods that we think we have scheduled in // addition to pods that we know have been scheduled. func (s *SimpleModeler) PodLister() algorithm.PodLister { return simpleModelerPods{s} } // simpleModelerPods is an adaptor so that SimpleModeler can be a PodLister. type simpleModelerPods struct { simpleModeler *SimpleModeler } // List returns pods known and assumed to exist. func (s simpleModelerPods) List(selector labels.Selector) (pods []*api.Pod, err error) { s.simpleModeler.LockedAction( func() { pods, err = s.simpleModeler.listPods(selector) }) return }
plugin/pkg/scheduler/modeler.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00017945455329027027, 0.00017060534446500242, 0.0001650168269407004, 0.00016933135339058936, 0.000004347819867689395 ]
{ "id": 2, "code_window": [ "\tif networkPluginName == \"\" {\n", "\t\t// default to the no_op plugin\n", "\t\tplug := &noopNetworkPlugin{}\n", "\t\treturn plug, nil\n", "\t}\n", "\n", "\tpluginMap := map[string]NetworkPlugin{}\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tif err := plug.Init(host); err != nil {\n", "\t\t\treturn nil, err\n", "\t\t}\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 95 }
// Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package blowfish // getNextWord returns the next big-endian uint32 value from the byte slice // at the given position in a circular manner, updating the position. func getNextWord(b []byte, pos *int) uint32 { var w uint32 j := *pos for i := 0; i < 4; i++ { w = w<<8 | uint32(b[j]) j++ if j >= len(b) { j = 0 } } *pos = j return w } // ExpandKey performs a key expansion on the given *Cipher. Specifically, it // performs the Blowfish algorithm's key schedule which sets up the *Cipher's // pi and substitution tables for calls to Encrypt. This is used, primarily, // by the bcrypt package to reuse the Blowfish key schedule during its // set up. It's unlikely that you need to use this directly. func ExpandKey(key []byte, c *Cipher) { j := 0 for i := 0; i < 18; i++ { // Using inlined getNextWord for performance. var d uint32 for k := 0; k < 4; k++ { d = d<<8 | uint32(key[j]) j++ if j >= len(key) { j = 0 } } c.p[i] ^= d } var l, r uint32 for i := 0; i < 18; i += 2 { l, r = encryptBlock(l, r, c) c.p[i], c.p[i+1] = l, r } for i := 0; i < 256; i += 2 { l, r = encryptBlock(l, r, c) c.s0[i], c.s0[i+1] = l, r } for i := 0; i < 256; i += 2 { l, r = encryptBlock(l, r, c) c.s1[i], c.s1[i+1] = l, r } for i := 0; i < 256; i += 2 { l, r = encryptBlock(l, r, c) c.s2[i], c.s2[i+1] = l, r } for i := 0; i < 256; i += 2 { l, r = encryptBlock(l, r, c) c.s3[i], c.s3[i+1] = l, r } } // This is similar to ExpandKey, but folds the salt during the key // schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero // salt passed in, reusing ExpandKey turns out to be a place of inefficiency // and specializing it here is useful. func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { j := 0 for i := 0; i < 18; i++ { c.p[i] ^= getNextWord(key, &j) } j = 0 var l, r uint32 for i := 0; i < 18; i += 2 { l ^= getNextWord(salt, &j) r ^= getNextWord(salt, &j) l, r = encryptBlock(l, r, c) c.p[i], c.p[i+1] = l, r } for i := 0; i < 256; i += 2 { l ^= getNextWord(salt, &j) r ^= getNextWord(salt, &j) l, r = encryptBlock(l, r, c) c.s0[i], c.s0[i+1] = l, r } for i := 0; i < 256; i += 2 { l ^= getNextWord(salt, &j) r ^= getNextWord(salt, &j) l, r = encryptBlock(l, r, c) c.s1[i], c.s1[i+1] = l, r } for i := 0; i < 256; i += 2 { l ^= getNextWord(salt, &j) r ^= getNextWord(salt, &j) l, r = encryptBlock(l, r, c) c.s2[i], c.s2[i+1] = l, r } for i := 0; i < 256; i += 2 { l ^= getNextWord(salt, &j) r ^= getNextWord(salt, &j) l, r = encryptBlock(l, r, c) c.s3[i], c.s3[i+1] = l, r } } func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { xl, xr := l, r xl ^= c.p[0] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] xr ^= c.p[17] return xr, xl } func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { xl, xr := l, r xl ^= c.p[17] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] xr ^= c.p[0] return xr, xl }
Godeps/_workspace/src/golang.org/x/crypto/blowfish/block.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00018758230726234615, 0.00017294807184953243, 0.0001644431904423982, 0.00017301170737482607, 0.000005752152446802938 ]
{ "id": 3, "code_window": [ "type noopNetworkPlugin struct {\n", "}\n", "\n", "func (plugin *noopNetworkPlugin) Init(host Host) error {\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ "const sysctlBridgeCallIptables = \"net/bridge/bridge-nf-call-iptables\"\n", "\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 137 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package network import ( "fmt" "net" "strings" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/validation" ) const DefaultPluginName = "kubernetes.io/no-op" // Called when the node's Pod CIDR is known when using the // controller manager's --allocate-node-cidrs=true option const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" // Plugin is an interface to network plugins for the kubelet type NetworkPlugin interface { // Init initializes the plugin. This will be called exactly once // before any other methods are called. Init(host Host) error // Called on various events like: // NET_PLUGIN_EVENT_POD_CIDR_CHANGE Event(name string, details map[string]interface{}) // Name returns the plugin's name. This will be used when searching // for a plugin by name, e.g. Name() string // SetUpPod is the method called after the infra container of // the pod has been created but before the other containers of the // pod are launched. SetUpPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // TearDownPod is the method called before a pod's infra container will be deleted TearDownPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // Status is the method called to obtain the ipv4 or ipv6 addresses of the container Status(namespace string, name string, podInfraContainerID kubecontainer.DockerID) (*PodNetworkStatus, error) } // PodNetworkStatus stores the network status of a pod (currently just the primary IP address) // This struct represents version "v1beta1" type PodNetworkStatus struct { unversioned.TypeMeta `json:",inline"` // IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that - // - kube expects to be reachable across the cluster // - service endpoints are constructed with // - will be reported in the PodStatus.PodIP field (will override the IP reported by docker) IP net.IP `json:"ip" description:"Primary IP address of the pod"` } // Host is an interface that plugins can use to access the kubelet. type Host interface { // Get the pod structure by its name, namespace GetPodByName(namespace, name string) (*api.Pod, bool) // GetKubeClient returns a client interface GetKubeClient() clientset.Interface // GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt) GetRuntime() kubecontainer.Runtime } // InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names. func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host) (NetworkPlugin, error) { if networkPluginName == "" { // default to the no_op plugin plug := &noopNetworkPlugin{} return plug, nil } pluginMap := map[string]NetworkPlugin{} allErrs := []error{} for _, plugin := range plugins { name := plugin.Name() if !validation.IsQualifiedName(name) { allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %#v", plugin)) continue } if _, found := pluginMap[name]; found { allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name)) continue } pluginMap[name] = plugin } chosenPlugin := pluginMap[networkPluginName] if chosenPlugin != nil { err := chosenPlugin.Init(host) if err != nil { allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) } else { glog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) } return chosenPlugin, utilerrors.NewAggregate(allErrs) } func UnescapePluginName(in string) string { return strings.Replace(in, "~", "/", -1) } type noopNetworkPlugin struct { } func (plugin *noopNetworkPlugin) Init(host Host) error { return nil } func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) { } func (plugin *noopNetworkPlugin) Name() string { return DefaultPluginName } func (plugin *noopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) Status(namespace string, name string, id kubecontainer.DockerID) (*PodNetworkStatus, error) { return nil, nil }
pkg/kubelet/network/plugins.go
1
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.996862530708313, 0.37664228677749634, 0.00016643060371279716, 0.004916411824524403, 0.4767621159553528 ]
{ "id": 3, "code_window": [ "type noopNetworkPlugin struct {\n", "}\n", "\n", "func (plugin *noopNetworkPlugin) Init(host Host) error {\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ "const sysctlBridgeCallIptables = \"net/bridge/bridge-nf-call-iptables\"\n", "\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 137 }
// Package extensions provides information and interaction with the // different extensions available for the OpenStack Identity service. package extensions
Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/identity/v2/extensions/doc.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00016783860337454826, 0.00016783860337454826, 0.00016783860337454826, 0.00016783860337454826, 0 ]
{ "id": 3, "code_window": [ "type noopNetworkPlugin struct {\n", "}\n", "\n", "func (plugin *noopNetworkPlugin) Init(host Host) error {\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ "const sysctlBridgeCallIptables = \"net/bridge/bridge-nf-call-iptables\"\n", "\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 137 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cache import ( "sync" ) // Queue is exactly like a Store, but has a Pop() method too. type Queue interface { Store // Pop blocks until it has something to return. Pop() interface{} // AddIfNotPresent adds a value previously // returned by Pop back into the queue as long // as nothing else (presumably more recent) // has since been added. AddIfNotPresent(interface{}) error // Return true if the first batch of items has been popped HasSynced() bool } // FIFO receives adds and updates from a Reflector, and puts them in a queue for // FIFO order processing. If multiple adds/updates of a single item happen while // an item is in the queue before it has been processed, it will only be // processed once, and when it is processed, the most recent version will be // processed. This can't be done with a channel. // // FIFO solves this use case: // * You want to process every object (exactly) once. // * You want to process the most recent version of the object when you process it. // * You do not want to process deleted objects, they should be removed from the queue. // * You do not want to periodically reprocess objects. // Compare with DeltaFIFO for other use cases. type FIFO struct { lock sync.RWMutex cond sync.Cond // We depend on the property that items in the set are in the queue and vice versa. items map[string]interface{} queue []string // populated is true if the first batch of items inserted by Replace() has been populated // or Delete/Add/Update was called first. populated bool // initialPopulationCount is the number of items inserted by the first call of Replace() initialPopulationCount int // keyFunc is used to make the key used for queued item insertion and retrieval, and // should be deterministic. keyFunc KeyFunc } var ( _ = Queue(&FIFO{}) // FIFO is a Queue ) // Return true if an Add/Update/Delete/AddIfNotPresent are called first, // or an Update called first but the first batch of items inserted by Replace() has been popped func (f *FIFO) HasSynced() bool { f.lock.Lock() defer f.lock.Unlock() return f.populated && f.initialPopulationCount == 0 } // Add inserts an item, and puts it in the queue. The item is only enqueued // if it doesn't already exist in the set. func (f *FIFO) Add(obj interface{}) error { id, err := f.keyFunc(obj) if err != nil { return KeyError{obj, err} } f.lock.Lock() defer f.lock.Unlock() f.populated = true if _, exists := f.items[id]; !exists { f.queue = append(f.queue, id) } f.items[id] = obj f.cond.Broadcast() return nil } // AddIfNotPresent inserts an item, and puts it in the queue. If the item is already // present in the set, it is neither enqueued nor added to the set. // // This is useful in a single producer/consumer scenario so that the consumer can // safely retry items without contending with the producer and potentially enqueueing // stale items. func (f *FIFO) AddIfNotPresent(obj interface{}) error { id, err := f.keyFunc(obj) if err != nil { return KeyError{obj, err} } f.lock.Lock() defer f.lock.Unlock() f.populated = true if _, exists := f.items[id]; exists { return nil } f.queue = append(f.queue, id) f.items[id] = obj f.cond.Broadcast() return nil } // Update is the same as Add in this implementation. func (f *FIFO) Update(obj interface{}) error { return f.Add(obj) } // Delete removes an item. It doesn't add it to the queue, because // this implementation assumes the consumer only cares about the objects, // not the order in which they were created/added. func (f *FIFO) Delete(obj interface{}) error { id, err := f.keyFunc(obj) if err != nil { return KeyError{obj, err} } f.lock.Lock() defer f.lock.Unlock() f.populated = true delete(f.items, id) return err } // List returns a list of all the items. func (f *FIFO) List() []interface{} { f.lock.RLock() defer f.lock.RUnlock() list := make([]interface{}, 0, len(f.items)) for _, item := range f.items { list = append(list, item) } return list } // ListKeys returns a list of all the keys of the objects currently // in the FIFO. func (f *FIFO) ListKeys() []string { f.lock.RLock() defer f.lock.RUnlock() list := make([]string, 0, len(f.items)) for key := range f.items { list = append(list, key) } return list } // Get returns the requested item, or sets exists=false. func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { key, err := f.keyFunc(obj) if err != nil { return nil, false, KeyError{obj, err} } return f.GetByKey(key) } // GetByKey returns the requested item, or sets exists=false. func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { f.lock.RLock() defer f.lock.RUnlock() item, exists = f.items[key] return item, exists, nil } // Pop waits until an item is ready and returns it. If multiple items are // ready, they are returned in the order in which they were added/updated. // The item is removed from the queue (and the store) before it is returned, // so if you don't successfully process it, you need to add it back with // AddIfNotPresent(). func (f *FIFO) Pop() interface{} { f.lock.Lock() defer f.lock.Unlock() for { for len(f.queue) == 0 { f.cond.Wait() } id := f.queue[0] f.queue = f.queue[1:] if f.initialPopulationCount > 0 { f.initialPopulationCount-- } item, ok := f.items[id] if !ok { // Item may have been deleted subsequently. continue } delete(f.items, id) return item } } // Replace will delete the contents of 'f', using instead the given map. // 'f' takes ownership of the map, you should not reference the map again // after calling this function. f's queue is reset, too; upon return, it // will contain the items in the map, in no particular order. func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { items := map[string]interface{}{} for _, item := range list { key, err := f.keyFunc(item) if err != nil { return KeyError{item, err} } items[key] = item } f.lock.Lock() defer f.lock.Unlock() if !f.populated { f.populated = true f.initialPopulationCount = len(items) } f.items = items f.queue = f.queue[:0] for id := range items { f.queue = append(f.queue, id) } if len(f.queue) > 0 { f.cond.Broadcast() } return nil } // NewFIFO returns a Store which can be used to queue up items to // process. func NewFIFO(keyFunc KeyFunc) *FIFO { f := &FIFO{ items: map[string]interface{}{}, queue: []string{}, keyFunc: keyFunc, } f.cond.L = &f.lock return f }
pkg/client/cache/fifo.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.012804567813873291, 0.0008270477410405874, 0.00016440414765384048, 0.00022806567722000182, 0.0024048774503171444 ]
{ "id": 3, "code_window": [ "type noopNetworkPlugin struct {\n", "}\n", "\n", "func (plugin *noopNetworkPlugin) Init(host Host) error {\n" ], "labels": [ "keep", "keep", "add", "keep" ], "after_edit": [ "const sysctlBridgeCallIptables = \"net/bridge/bridge-nf-call-iptables\"\n", "\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 137 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "bytes" "net/http" "testing" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/client/unversioned/fake" ) func TestCreateNamespace(t *testing.T) { namespaceObject := &api.Namespace{} namespaceObject.Name = "my-namespace" f, tf, codec := NewAPIFactory() tf.Printer = &testPrinter{} tf.Client = &fake.RESTClient{ Codec: codec, Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { switch p, m := req.URL.Path, req.Method; { case p == "/namespaces" && m == "POST": return &http.Response{StatusCode: 201, Body: objBody(codec, namespaceObject)}, nil default: t.Fatalf("unexpected request: %#v\n%#v", req.URL, req) return nil, nil } }), } buf := bytes.NewBuffer([]byte{}) cmd := NewCmdCreateNamespace(f, buf) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{namespaceObject.Name}) expectedOutput := "namespace/" + namespaceObject.Name + "\n" if buf.String() != expectedOutput { t.Errorf("expected output: %s, but got: %s", buf.String(), expectedOutput) } }
pkg/kubectl/cmd/create_namespace_test.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.0002660772588569671, 0.00019210815662518144, 0.00017164189193863422, 0.00017725182988215238, 0.00003328030652482994 ]
{ "id": 4, "code_window": [ "func (plugin *noopNetworkPlugin) Init(host Host) error {\n", "\treturn nil\n", "}\n", "\n", "func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) {\n" ], "labels": [ "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Set bridge-nf-call-iptables=1 to maintain compatibility with older\n", "\t// kubernetes versions to ensure the iptables-based kube proxy functions\n", "\t// correctly. Other plugins are responsible for setting this correctly\n", "\t// depending on whether or not they connect containers to Linux bridges\n", "\t// or use some other mechanism (ie, SDN vswitch).\n", "\n", "\t// Ensure the netfilter module is loaded on kernel >= 3.18; previously\n", "\t// it was built-in.\n", "\tutilexec.New().Command(\"modprobe\", \"br-netfilter\").CombinedOutput()\n", "\tif err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil {\n", "\t\tglog.Warningf(\"can't set sysctl %s: %v\", sysctlBridgeCallIptables, err)\n", "\t}\n", "\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 138 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package network import ( "fmt" "net" "strings" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/validation" ) const DefaultPluginName = "kubernetes.io/no-op" // Called when the node's Pod CIDR is known when using the // controller manager's --allocate-node-cidrs=true option const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" // Plugin is an interface to network plugins for the kubelet type NetworkPlugin interface { // Init initializes the plugin. This will be called exactly once // before any other methods are called. Init(host Host) error // Called on various events like: // NET_PLUGIN_EVENT_POD_CIDR_CHANGE Event(name string, details map[string]interface{}) // Name returns the plugin's name. This will be used when searching // for a plugin by name, e.g. Name() string // SetUpPod is the method called after the infra container of // the pod has been created but before the other containers of the // pod are launched. SetUpPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // TearDownPod is the method called before a pod's infra container will be deleted TearDownPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // Status is the method called to obtain the ipv4 or ipv6 addresses of the container Status(namespace string, name string, podInfraContainerID kubecontainer.DockerID) (*PodNetworkStatus, error) } // PodNetworkStatus stores the network status of a pod (currently just the primary IP address) // This struct represents version "v1beta1" type PodNetworkStatus struct { unversioned.TypeMeta `json:",inline"` // IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that - // - kube expects to be reachable across the cluster // - service endpoints are constructed with // - will be reported in the PodStatus.PodIP field (will override the IP reported by docker) IP net.IP `json:"ip" description:"Primary IP address of the pod"` } // Host is an interface that plugins can use to access the kubelet. type Host interface { // Get the pod structure by its name, namespace GetPodByName(namespace, name string) (*api.Pod, bool) // GetKubeClient returns a client interface GetKubeClient() clientset.Interface // GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt) GetRuntime() kubecontainer.Runtime } // InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names. func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host) (NetworkPlugin, error) { if networkPluginName == "" { // default to the no_op plugin plug := &noopNetworkPlugin{} return plug, nil } pluginMap := map[string]NetworkPlugin{} allErrs := []error{} for _, plugin := range plugins { name := plugin.Name() if !validation.IsQualifiedName(name) { allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %#v", plugin)) continue } if _, found := pluginMap[name]; found { allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name)) continue } pluginMap[name] = plugin } chosenPlugin := pluginMap[networkPluginName] if chosenPlugin != nil { err := chosenPlugin.Init(host) if err != nil { allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) } else { glog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) } return chosenPlugin, utilerrors.NewAggregate(allErrs) } func UnescapePluginName(in string) string { return strings.Replace(in, "~", "/", -1) } type noopNetworkPlugin struct { } func (plugin *noopNetworkPlugin) Init(host Host) error { return nil } func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) { } func (plugin *noopNetworkPlugin) Name() string { return DefaultPluginName } func (plugin *noopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) Status(namespace string, name string, id kubecontainer.DockerID) (*PodNetworkStatus, error) { return nil, nil }
pkg/kubelet/network/plugins.go
1
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.9979191422462463, 0.2495582550764084, 0.00016454483557026833, 0.004113121889531612, 0.40219712257385254 ]
{ "id": 4, "code_window": [ "func (plugin *noopNetworkPlugin) Init(host Host) error {\n", "\treturn nil\n", "}\n", "\n", "func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) {\n" ], "labels": [ "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Set bridge-nf-call-iptables=1 to maintain compatibility with older\n", "\t// kubernetes versions to ensure the iptables-based kube proxy functions\n", "\t// correctly. Other plugins are responsible for setting this correctly\n", "\t// depending on whether or not they connect containers to Linux bridges\n", "\t// or use some other mechanism (ie, SDN vswitch).\n", "\n", "\t// Ensure the netfilter module is loaded on kernel >= 3.18; previously\n", "\t// it was built-in.\n", "\tutilexec.New().Command(\"modprobe\", \"br-netfilter\").CombinedOutput()\n", "\tif err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil {\n", "\t\tglog.Warningf(\"can't set sysctl %s: %v\", sysctlBridgeCallIptables, err)\n", "\t}\n", "\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 138 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package etcd import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/registry/cachesize" "k8s.io/kubernetes/pkg/registry/generic" etcdgeneric "k8s.io/kubernetes/pkg/registry/generic/etcd" "k8s.io/kubernetes/pkg/registry/serviceaccount" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/storage" ) type REST struct { *etcdgeneric.Etcd } // NewREST returns a RESTStorage object that will work against service accounts. func NewREST(s storage.Interface, storageDecorator generic.StorageDecorator) *REST { prefix := "/serviceaccounts" newListFunc := func() runtime.Object { return &api.ServiceAccountList{} } storageInterface := storageDecorator( s, cachesize.GetWatchCacheSizeByResource(cachesize.ServiceAccounts), &api.ServiceAccount{}, prefix, serviceaccount.Strategy, newListFunc) store := &etcdgeneric.Etcd{ NewFunc: func() runtime.Object { return &api.ServiceAccount{} }, NewListFunc: newListFunc, KeyRootFunc: func(ctx api.Context) string { return etcdgeneric.NamespaceKeyRootFunc(ctx, prefix) }, KeyFunc: func(ctx api.Context, name string) (string, error) { return etcdgeneric.NamespaceKeyFunc(ctx, prefix, name) }, ObjectNameFunc: func(obj runtime.Object) (string, error) { return obj.(*api.ServiceAccount).Name, nil }, PredicateFunc: func(label labels.Selector, field fields.Selector) generic.Matcher { return serviceaccount.Matcher(label, field) }, QualifiedResource: api.Resource("serviceaccounts"), CreateStrategy: serviceaccount.Strategy, UpdateStrategy: serviceaccount.Strategy, ReturnDeletedObject: true, Storage: storageInterface, } return &REST{store} }
pkg/registry/serviceaccount/etcd/etcd.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.0006402599392458797, 0.00024130033852998167, 0.00016645521100144833, 0.0001762328902259469, 0.00016302541189361364 ]
{ "id": 4, "code_window": [ "func (plugin *noopNetworkPlugin) Init(host Host) error {\n", "\treturn nil\n", "}\n", "\n", "func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) {\n" ], "labels": [ "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Set bridge-nf-call-iptables=1 to maintain compatibility with older\n", "\t// kubernetes versions to ensure the iptables-based kube proxy functions\n", "\t// correctly. Other plugins are responsible for setting this correctly\n", "\t// depending on whether or not they connect containers to Linux bridges\n", "\t// or use some other mechanism (ie, SDN vswitch).\n", "\n", "\t// Ensure the netfilter module is loaded on kernel >= 3.18; previously\n", "\t// it was built-in.\n", "\tutilexec.New().Command(\"modprobe\", \"br-netfilter\").CombinedOutput()\n", "\tif err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil {\n", "\t\tglog.Warningf(\"can't set sysctl %s: %v\", sysctlBridgeCallIptables, err)\n", "\t}\n", "\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 138 }
{ "kind": "Pod", "apiVersion": "v1", "metadata": { "name": "name", "labels": { "name": "redis-master" } }, "spec": { "containers": [ { "name": "master", "image": "redis", "args": "this is a bad command" } ] } }
pkg/api/validation/testdata/v1/invalidPod1.json
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.0001716819970170036, 0.0001715632388368249, 0.00017144446610473096, 0.0001715632388368249, 1.1876545613631606e-7 ]
{ "id": 4, "code_window": [ "func (plugin *noopNetworkPlugin) Init(host Host) error {\n", "\treturn nil\n", "}\n", "\n", "func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) {\n" ], "labels": [ "add", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t// Set bridge-nf-call-iptables=1 to maintain compatibility with older\n", "\t// kubernetes versions to ensure the iptables-based kube proxy functions\n", "\t// correctly. Other plugins are responsible for setting this correctly\n", "\t// depending on whether or not they connect containers to Linux bridges\n", "\t// or use some other mechanism (ie, SDN vswitch).\n", "\n", "\t// Ensure the netfilter module is loaded on kernel >= 3.18; previously\n", "\t// it was built-in.\n", "\tutilexec.New().Command(\"modprobe\", \"br-netfilter\").CombinedOutput()\n", "\tif err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil {\n", "\t\tglog.Warningf(\"can't set sysctl %s: %v\", sysctlBridgeCallIptables, err)\n", "\t}\n", "\n" ], "file_path": "pkg/kubelet/network/plugins.go", "type": "add", "edit_start_line_idx": 138 }
"VH2&H\\\/"
Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-318
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00017122540157288313, 0.00017122540157288313, 0.00017122540157288313, 0.00017122540157288313, 0 ]
{ "id": 5, "code_window": [ "\t\"encoding/base32\"\n", "\t\"fmt\"\n", "\t\"net\"\n", "\t\"reflect\"\n", "\t\"strconv\"\n", "\t\"strings\"\n", "\t\"sync\"\n", "\t\"time\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"os\"\n" ], "file_path": "pkg/proxy/iptables/proxier.go", "type": "add", "edit_start_line_idx": 28 }
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package iptables // // NOTE: this needs to be tested in e2e since it uses iptables for everything. // import ( "bytes" "crypto/sha256" "encoding/base32" "fmt" "net" "reflect" "strconv" "strings" "sync" "time" "github.com/coreos/go-semver/semver" "github.com/davecgh/go-spew/spew" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/types" utilexec "k8s.io/kubernetes/pkg/util/exec" utiliptables "k8s.io/kubernetes/pkg/util/iptables" "k8s.io/kubernetes/pkg/util/slice" utilsysctl "k8s.io/kubernetes/pkg/util/sysctl" ) // iptablesMinVersion is the minimum version of iptables for which we will use the Proxier // from this package instead of the userspace Proxier. While most of the // features we need were available earlier, the '-C' flag was added more // recently. We use that indirectly in Ensure* functions, and if we don't // have it, we have to be extra careful about the exact args we feed in being // the same as the args we read back (iptables itself normalizes some args). // This is the "new" Proxier, so we require "new" versions of tools. const iptablesMinVersion = utiliptables.MinCheckVersion // the services chain const kubeServicesChain utiliptables.Chain = "KUBE-SERVICES" // the nodeports chain const kubeNodePortsChain utiliptables.Chain = "KUBE-NODEPORTS" // the kubernetes postrouting chain const kubePostroutingChain utiliptables.Chain = "KUBE-POSTROUTING" // the mark-for-masquerade chain const kubeMarkMasqChain utiliptables.Chain = "KUBE-MARK-MASQ" // the mark we apply to traffic needing SNAT // TODO(thockin): Remove this for v1.3 or v1.4. const oldIptablesMasqueradeMark = "0x4d415351" // IptablesVersioner can query the current iptables version. type IptablesVersioner interface { // returns "X.Y.Z" GetVersion() (string, error) } // KernelCompatTester tests whether the required kernel capabilities are // present to run the iptables proxier. type KernelCompatTester interface { IsCompatible() error } // CanUseIptablesProxier returns true if we should use the iptables Proxier // instead of the "classic" userspace Proxier. This is determined by checking // the iptables version and for the existence of kernel features. It may return // an error if it fails to get the iptables version without error, in which // case it will also return false. func CanUseIptablesProxier(iptver IptablesVersioner, kcompat KernelCompatTester) (bool, error) { minVersion, err := semver.NewVersion(iptablesMinVersion) if err != nil { return false, err } // returns "X.Y.Z" versionString, err := iptver.GetVersion() if err != nil { return false, err } version, err := semver.NewVersion(versionString) if err != nil { return false, err } if version.LessThan(*minVersion) { return false, nil } // Check that the kernel supports what we need. if err := kcompat.IsCompatible(); err != nil { return false, err } return true, nil } type LinuxKernelCompatTester struct{} func (lkct LinuxKernelCompatTester) IsCompatible() error { // Check for the required sysctls. We don't care about the value, just // that it exists. If this Proxier is chosen, we'll initialize it as we // need. _, err := utilsysctl.GetSysctl(sysctlRouteLocalnet) return err } const sysctlRouteLocalnet = "net/ipv4/conf/all/route_localnet" const sysctlBridgeCallIptables = "net/bridge/bridge-nf-call-iptables" // internal struct for string service information type serviceInfo struct { clusterIP net.IP port int protocol api.Protocol nodePort int loadBalancerStatus api.LoadBalancerStatus sessionAffinityType api.ServiceAffinity stickyMaxAgeSeconds int externalIPs []string } // returns a new serviceInfo struct func newServiceInfo(service proxy.ServicePortName) *serviceInfo { return &serviceInfo{ sessionAffinityType: api.ServiceAffinityNone, // default stickyMaxAgeSeconds: 180, // TODO: paramaterize this in the API. } } // Proxier is an iptables based proxy for connections between a localhost:lport // and services that provide the actual backends. type Proxier struct { mu sync.Mutex // protects the following fields serviceMap map[proxy.ServicePortName]*serviceInfo endpointsMap map[proxy.ServicePortName][]string portsMap map[localPort]closeable haveReceivedServiceUpdate bool // true once we've seen an OnServiceUpdate event haveReceivedEndpointsUpdate bool // true once we've seen an OnEndpointsUpdate event // These are effectively const and do not need the mutex to be held. syncPeriod time.Duration iptables utiliptables.Interface masqueradeAll bool masqueradeMark string } type localPort struct { desc string ip string port int protocol string } func (lp *localPort) String() string { return fmt.Sprintf("%q (%s:%d/%s)", lp.desc, lp.ip, lp.port, lp.protocol) } type closeable interface { Close() error } // Proxier implements ProxyProvider var _ proxy.ProxyProvider = &Proxier{} // NewProxier returns a new Proxier given an iptables Interface instance. // Because of the iptables logic, it is assumed that there is only a single Proxier active on a machine. // An error will be returned if iptables fails to update or acquire the initial lock. // Once a proxier is created, it will keep iptables up to date in the background and // will not terminate if a particular iptables call fails. func NewProxier(ipt utiliptables.Interface, exec utilexec.Interface, syncPeriod time.Duration, masqueradeAll bool, masqueradeBit int) (*Proxier, error) { // Set the route_localnet sysctl we need for if err := utilsysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) } // Load the module. It's OK if this fails (e.g. the module is not present) // because we'll catch the error on the sysctl, which is what we actually // care about. exec.Command("modprobe", "br-netfilter").CombinedOutput() if err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil { glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIptables, err) } // Generate the masquerade mark to use for SNAT rules. if masqueradeBit < 0 || masqueradeBit > 31 { return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit) } masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) return &Proxier{ serviceMap: make(map[proxy.ServicePortName]*serviceInfo), endpointsMap: make(map[proxy.ServicePortName][]string), portsMap: make(map[localPort]closeable), syncPeriod: syncPeriod, iptables: ipt, masqueradeAll: masqueradeAll, masqueradeMark: masqueradeMark, }, nil } // CleanupLeftovers removes all iptables rules and chains created by the Proxier // It returns true if an error was encountered. Errors are logged. func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { // Unlink the services chain. args := []string{ "-m", "comment", "--comment", "kubernetes service portals", "-j", string(kubeServicesChain), } tableChainsWithJumpServices := []struct { table utiliptables.Table chain utiliptables.Chain }{ {utiliptables.TableFilter, utiliptables.ChainOutput}, {utiliptables.TableNAT, utiliptables.ChainOutput}, {utiliptables.TableNAT, utiliptables.ChainPrerouting}, } for _, tc := range tableChainsWithJumpServices { if err := ipt.DeleteRule(tc.table, tc.chain, args...); err != nil { if !utiliptables.IsNotFoundError(err) { glog.Errorf("Error removing pure-iptables proxy rule: %v", err) encounteredError = true } } } // Unlink the postrouting chain. args = []string{ "-m", "comment", "--comment", "kubernetes postrouting rules", "-j", string(kubePostroutingChain), } if err := ipt.DeleteRule(utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil { if !utiliptables.IsNotFoundError(err) { glog.Errorf("Error removing pure-iptables proxy rule: %v", err) encounteredError = true } } // Flush and remove all of our chains. if iptablesSaveRaw, err := ipt.Save(utiliptables.TableNAT); err != nil { glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableNAT, err) encounteredError = true } else { existingNATChains := getChainLines(utiliptables.TableNAT, iptablesSaveRaw) natChains := bytes.NewBuffer(nil) natRules := bytes.NewBuffer(nil) writeLine(natChains, "*nat") // Start with chains we know we need to remove. for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeNodePortsChain, kubePostroutingChain, kubeMarkMasqChain} { if _, found := existingNATChains[chain]; found { chainString := string(chain) writeLine(natChains, existingNATChains[chain]) // flush writeLine(natRules, "-X", chainString) // delete } } // Hunt for service and endpoint chains. for chain := range existingNATChains { chainString := string(chain) if strings.HasPrefix(chainString, "KUBE-SVC-") || strings.HasPrefix(chainString, "KUBE-SEP-") { writeLine(natChains, existingNATChains[chain]) // flush writeLine(natRules, "-X", chainString) // delete } } writeLine(natRules, "COMMIT") natLines := append(natChains.Bytes(), natRules.Bytes()...) // Write it. err = ipt.Restore(utiliptables.TableNAT, natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableNAT, err) encounteredError = true } } { filterBuf := bytes.NewBuffer(nil) writeLine(filterBuf, "*filter") writeLine(filterBuf, fmt.Sprintf(":%s - [0:0]", kubeServicesChain)) writeLine(filterBuf, fmt.Sprintf("-X %s", kubeServicesChain)) writeLine(filterBuf, "COMMIT") // Write it. if err := ipt.Restore(utiliptables.TableFilter, filterBuf.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err) encounteredError = true } } // Clean up the older SNAT rule which was directly in POSTROUTING. // TODO(thockin): Remove this for v1.3 or v1.4. args = []string{ "-m", "comment", "--comment", "kubernetes service traffic requiring SNAT", "-m", "mark", "--mark", oldIptablesMasqueradeMark, "-j", "MASQUERADE", } if err := ipt.DeleteRule(utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil { if !utiliptables.IsNotFoundError(err) { glog.Errorf("Error removing old-style SNAT rule: %v", err) encounteredError = true } } return encounteredError } func (proxier *Proxier) sameConfig(info *serviceInfo, service *api.Service, port *api.ServicePort) bool { if info.protocol != port.Protocol || info.port != port.Port || info.nodePort != port.NodePort { return false } if !info.clusterIP.Equal(net.ParseIP(service.Spec.ClusterIP)) { return false } if !ipsEqual(info.externalIPs, service.Spec.ExternalIPs) { return false } if !api.LoadBalancerStatusEqual(&info.loadBalancerStatus, &service.Status.LoadBalancer) { return false } if info.sessionAffinityType != service.Spec.SessionAffinity { return false } return true } func ipsEqual(lhs, rhs []string) bool { if len(lhs) != len(rhs) { return false } for i := range lhs { if lhs[i] != rhs[i] { return false } } return true } // Sync is called to immediately synchronize the proxier state to iptables func (proxier *Proxier) Sync() { proxier.mu.Lock() defer proxier.mu.Unlock() proxier.syncProxyRules() } // SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return. func (proxier *Proxier) SyncLoop() { t := time.NewTicker(proxier.syncPeriod) defer t.Stop() for { <-t.C glog.V(6).Infof("Periodic sync") proxier.Sync() } } // OnServiceUpdate tracks the active set of service proxies. // They will be synchronized using syncProxyRules() func (proxier *Proxier) OnServiceUpdate(allServices []api.Service) { start := time.Now() defer func() { glog.V(4).Infof("OnServiceUpdate took %v for %d services", time.Since(start), len(allServices)) }() proxier.mu.Lock() defer proxier.mu.Unlock() proxier.haveReceivedServiceUpdate = true activeServices := make(map[proxy.ServicePortName]bool) // use a map as a set for i := range allServices { service := &allServices[i] svcName := types.NamespacedName{ Namespace: service.Namespace, Name: service.Name, } // if ClusterIP is "None" or empty, skip proxying if !api.IsServiceIPSet(service) { glog.V(3).Infof("Skipping service %s due to clusterIP = %q", svcName, service.Spec.ClusterIP) continue } for i := range service.Spec.Ports { servicePort := &service.Spec.Ports[i] serviceName := proxy.ServicePortName{ NamespacedName: svcName, Port: servicePort.Name, } activeServices[serviceName] = true info, exists := proxier.serviceMap[serviceName] if exists && proxier.sameConfig(info, service, servicePort) { // Nothing changed. continue } if exists { // Something changed. glog.V(3).Infof("Something changed for service %q: removing it", serviceName) delete(proxier.serviceMap, serviceName) } serviceIP := net.ParseIP(service.Spec.ClusterIP) glog.V(1).Infof("Adding new service %q at %s:%d/%s", serviceName, serviceIP, servicePort.Port, servicePort.Protocol) info = newServiceInfo(serviceName) info.clusterIP = serviceIP info.port = servicePort.Port info.protocol = servicePort.Protocol info.nodePort = servicePort.NodePort info.externalIPs = service.Spec.ExternalIPs // Deep-copy in case the service instance changes info.loadBalancerStatus = *api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) info.sessionAffinityType = service.Spec.SessionAffinity proxier.serviceMap[serviceName] = info glog.V(4).Infof("added serviceInfo(%s): %s", serviceName, spew.Sdump(info)) } } // Remove services missing from the update. for name := range proxier.serviceMap { if !activeServices[name] { glog.V(1).Infof("Removing service %q", name) delete(proxier.serviceMap, name) } } proxier.syncProxyRules() } // OnEndpointsUpdate takes in a slice of updated endpoints. func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []api.Endpoints) { start := time.Now() defer func() { glog.V(4).Infof("OnEndpointsUpdate took %v for %d endpoints", time.Since(start), len(allEndpoints)) }() proxier.mu.Lock() defer proxier.mu.Unlock() proxier.haveReceivedEndpointsUpdate = true activeEndpoints := make(map[proxy.ServicePortName]bool) // use a map as a set // Update endpoints for services. for i := range allEndpoints { svcEndpoints := &allEndpoints[i] // We need to build a map of portname -> all ip:ports for that // portname. Explode Endpoints.Subsets[*] into this structure. portsToEndpoints := map[string][]hostPortPair{} for i := range svcEndpoints.Subsets { ss := &svcEndpoints.Subsets[i] for i := range ss.Ports { port := &ss.Ports[i] for i := range ss.Addresses { addr := &ss.Addresses[i] portsToEndpoints[port.Name] = append(portsToEndpoints[port.Name], hostPortPair{addr.IP, port.Port}) } } } for portname := range portsToEndpoints { svcPort := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: svcEndpoints.Namespace, Name: svcEndpoints.Name}, Port: portname} curEndpoints := proxier.endpointsMap[svcPort] newEndpoints := flattenValidEndpoints(portsToEndpoints[portname]) if len(curEndpoints) != len(newEndpoints) || !slicesEquiv(slice.CopyStrings(curEndpoints), newEndpoints) { glog.V(1).Infof("Setting endpoints for %q to %+v", svcPort, newEndpoints) proxier.endpointsMap[svcPort] = newEndpoints } activeEndpoints[svcPort] = true } } // Remove endpoints missing from the update. for name := range proxier.endpointsMap { if !activeEndpoints[name] { glog.V(2).Infof("Removing endpoints for %q", name) delete(proxier.endpointsMap, name) } } proxier.syncProxyRules() } // used in OnEndpointsUpdate type hostPortPair struct { host string port int } func isValidEndpoint(hpp *hostPortPair) bool { return hpp.host != "" && hpp.port > 0 } // Tests whether two slices are equivalent. This sorts both slices in-place. func slicesEquiv(lhs, rhs []string) bool { if len(lhs) != len(rhs) { return false } if reflect.DeepEqual(slice.SortStrings(lhs), slice.SortStrings(rhs)) { return true } return false } func flattenValidEndpoints(endpoints []hostPortPair) []string { // Convert Endpoint objects into strings for easier use later. var result []string for i := range endpoints { hpp := &endpoints[i] if isValidEndpoint(hpp) { result = append(result, net.JoinHostPort(hpp.host, strconv.Itoa(hpp.port))) } else { glog.Warningf("got invalid endpoint: %+v", *hpp) } } return result } // servicePortChainName takes the ServicePortName for a service and // returns the associated iptables chain. This is computed by hashing (sha256) // then encoding to base32 and truncating with the prefix "KUBE-SVC-". We do // this because Iptables Chain Names must be <= 28 chars long, and the longer // they are the harder they are to read. func servicePortChainName(s proxy.ServicePortName, protocol string) utiliptables.Chain { hash := sha256.Sum256([]byte(s.String() + protocol)) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain("KUBE-SVC-" + encoded[:16]) } // This is the same as servicePortChainName but with the endpoint included. func servicePortEndpointChainName(s proxy.ServicePortName, protocol string, endpoint string) utiliptables.Chain { hash := sha256.Sum256([]byte(s.String() + protocol + endpoint)) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain("KUBE-SEP-" + encoded[:16]) } // This is where all of the iptables-save/restore calls happen. // The only other iptables rules are those that are setup in iptablesInit() // assumes proxier.mu is held func (proxier *Proxier) syncProxyRules() { start := time.Now() defer func() { glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) }() // don't sync rules till we've received services and endpoints if !proxier.haveReceivedEndpointsUpdate || !proxier.haveReceivedServiceUpdate { glog.V(2).Info("Not syncing iptables until Services and Endpoints have been received from master") return } glog.V(3).Infof("Syncing iptables rules") // Create and link the kube services chain. { tablesNeedServicesChain := []utiliptables.Table{utiliptables.TableFilter, utiliptables.TableNAT} for _, table := range tablesNeedServicesChain { if _, err := proxier.iptables.EnsureChain(table, kubeServicesChain); err != nil { glog.Errorf("Failed to ensure that %s chain %s exists: %v", table, kubeServicesChain, err) return } } tableChainsNeedJumpServices := []struct { table utiliptables.Table chain utiliptables.Chain }{ {utiliptables.TableFilter, utiliptables.ChainOutput}, {utiliptables.TableNAT, utiliptables.ChainOutput}, {utiliptables.TableNAT, utiliptables.ChainPrerouting}, } comment := "kubernetes service portals" args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeServicesChain)} for _, tc := range tableChainsNeedJumpServices { if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, tc.table, tc.chain, args...); err != nil { glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeServicesChain, err) return } } } // Create and link the kube postrouting chain. { if _, err := proxier.iptables.EnsureChain(utiliptables.TableNAT, kubePostroutingChain); err != nil { glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubePostroutingChain, err) return } comment := "kubernetes postrouting rules" args := []string{"-m", "comment", "--comment", comment, "-j", string(kubePostroutingChain)} if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil { glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, kubePostroutingChain, err) return } } // Get iptables-save output so we can check for existing chains and rules. // This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore existingFilterChains := make(map[utiliptables.Chain]string) iptablesSaveRaw, err := proxier.iptables.Save(utiliptables.TableFilter) if err != nil { // if we failed to get any rules glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output existingFilterChains = getChainLines(utiliptables.TableFilter, iptablesSaveRaw) } existingNATChains := make(map[utiliptables.Chain]string) iptablesSaveRaw, err = proxier.iptables.Save(utiliptables.TableNAT) if err != nil { // if we failed to get any rules glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err) } else { // otherwise parse the output existingNATChains = getChainLines(utiliptables.TableNAT, iptablesSaveRaw) } filterChains := bytes.NewBuffer(nil) filterRules := bytes.NewBuffer(nil) natChains := bytes.NewBuffer(nil) natRules := bytes.NewBuffer(nil) // Write table headers. writeLine(filterChains, "*filter") writeLine(natChains, "*nat") // Make sure we keep stats for the top-level chains, if they existed // (which most should have because we created them above). if chain, ok := existingFilterChains[kubeServicesChain]; ok { writeLine(filterChains, chain) } else { writeLine(filterChains, makeChainLine(kubeServicesChain)) } if chain, ok := existingNATChains[kubeServicesChain]; ok { writeLine(natChains, chain) } else { writeLine(natChains, makeChainLine(kubeServicesChain)) } if chain, ok := existingNATChains[kubeNodePortsChain]; ok { writeLine(natChains, chain) } else { writeLine(natChains, makeChainLine(kubeNodePortsChain)) } if chain, ok := existingNATChains[kubePostroutingChain]; ok { writeLine(natChains, chain) } else { writeLine(natChains, makeChainLine(kubePostroutingChain)) } if chain, ok := existingNATChains[kubeMarkMasqChain]; ok { writeLine(natChains, chain) } else { writeLine(natChains, makeChainLine(kubeMarkMasqChain)) } // Install the kubernetes-specific postrouting rules. We use a whole chain for // this so that it is easier to flush and change, for example if the mark // value should ever change. writeLine(natRules, []string{ "-A", string(kubePostroutingChain), "-m", "comment", "--comment", `"kubernetes service traffic requiring SNAT"`, "-m", "mark", "--mark", proxier.masqueradeMark, "-j", "MASQUERADE", }...) // Install the kubernetes-specific masquerade mark rule. We use a whole chain for // this so that it is easier to flush and change, for example if the mark // value should ever change. writeLine(natRules, []string{ "-A", string(kubeMarkMasqChain), "-j", "MARK", "--set-xmark", proxier.masqueradeMark, }...) // Accumulate NAT chains to keep. activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set // Accumulate new local ports that we have opened. newLocalPorts := map[localPort]closeable{} // Build rules for each service. for svcName, svcInfo := range proxier.serviceMap { protocol := strings.ToLower(string(svcInfo.protocol)) // Create the per-service chain, retaining counters if possible. svcChain := servicePortChainName(svcName, protocol) if chain, ok := existingNATChains[svcChain]; ok { writeLine(natChains, chain) } else { writeLine(natChains, makeChainLine(svcChain)) } activeNATChains[svcChain] = true // Capture the clusterIP. args := []string{ "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcName.String()), "-m", protocol, "-p", protocol, "-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()), "--dport", fmt.Sprintf("%d", svcInfo.port), } if proxier.masqueradeAll { writeLine(natRules, append(args, "-j", string(kubeMarkMasqChain))...) } writeLine(natRules, append(args, "-j", string(svcChain))...) // Capture externalIPs. for _, externalIP := range svcInfo.externalIPs { // If the "external" IP happens to be an IP that is local to this // machine, hold the local port open so no other process can open it // (because the socket might open but it would never work). if local, err := isLocalIP(externalIP); err != nil { glog.Errorf("can't determine if IP is local, assuming not: %v", err) } else if local { lp := localPort{ desc: "externalIP for " + svcName.String(), ip: externalIP, port: svcInfo.port, protocol: protocol, } if proxier.portsMap[lp] != nil { newLocalPorts[lp] = proxier.portsMap[lp] } else { socket, err := openLocalPort(&lp) if err != nil { glog.Errorf("can't open %s, skipping this externalIP: %v", lp.String(), err) continue } newLocalPorts[lp] = socket } } // We're holding the port, so it's OK to install iptables rules. args := []string{ "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcName.String()), "-m", protocol, "-p", protocol, "-d", fmt.Sprintf("%s/32", externalIP), "--dport", fmt.Sprintf("%d", svcInfo.port), } // We have to SNAT packets to external IPs. writeLine(natRules, append(args, "-j", string(kubeMarkMasqChain))...) // Allow traffic for external IPs that does not come from a bridge (i.e. not from a container) // nor from a local process to be forwarded to the service. // This rule roughly translates to "all traffic from off-machine". // This is imperfect in the face of network plugins that might not use a bridge, but we can revisit that later. externalTrafficOnlyArgs := append(args, "-m", "physdev", "!", "--physdev-is-in", "-m", "addrtype", "!", "--src-type", "LOCAL") writeLine(natRules, append(externalTrafficOnlyArgs, "-j", string(svcChain))...) dstLocalOnlyArgs := append(args, "-m", "addrtype", "--dst-type", "LOCAL") // Allow traffic bound for external IPs that happen to be recognized as local IPs to stay local. // This covers cases like GCE load-balancers which get added to the local routing table. writeLine(natRules, append(dstLocalOnlyArgs, "-j", string(svcChain))...) } // Capture load-balancer ingress. for _, ingress := range svcInfo.loadBalancerStatus.Ingress { if ingress.IP != "" { args := []string{ "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s loadbalancer IP"`, svcName.String()), "-m", protocol, "-p", protocol, "-d", fmt.Sprintf("%s/32", ingress.IP), "--dport", fmt.Sprintf("%d", svcInfo.port), } // We have to SNAT packets from external IPs. writeLine(natRules, append(args, "-j", string(kubeMarkMasqChain))...) writeLine(natRules, append(args, "-j", string(svcChain))...) } } // Capture nodeports. If we had more than 2 rules it might be // worthwhile to make a new per-service chain for nodeport rules, but // with just 2 rules it ends up being a waste and a cognitive burden. if svcInfo.nodePort != 0 { // Hold the local port open so no other process can open it // (because the socket might open but it would never work). lp := localPort{ desc: "nodePort for " + svcName.String(), ip: "", port: svcInfo.nodePort, protocol: protocol, } if proxier.portsMap[lp] != nil { newLocalPorts[lp] = proxier.portsMap[lp] } else { socket, err := openLocalPort(&lp) if err != nil { glog.Errorf("can't open %s, skipping this nodePort: %v", lp.String(), err) continue } newLocalPorts[lp] = socket } // We're holding the port, so it's OK to install iptables rules. args := []string{ "-A", string(kubeNodePortsChain), "-m", "comment", "--comment", svcName.String(), "-m", protocol, "-p", protocol, "--dport", fmt.Sprintf("%d", svcInfo.nodePort), } // Nodeports need SNAT. writeLine(natRules, append(args, "-j", string(kubeMarkMasqChain))...) // Jump to the service chain. writeLine(natRules, append(args, "-j", string(svcChain))...) } // If the service has no endpoints then reject packets. if len(proxier.endpointsMap[svcName]) == 0 { writeLine(filterRules, "-A", string(kubeServicesChain), "-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcName.String()), "-m", protocol, "-p", protocol, "-d", fmt.Sprintf("%s/32", svcInfo.clusterIP.String()), "--dport", fmt.Sprintf("%d", svcInfo.port), "-j", "REJECT", ) continue } // Generate the per-endpoint chains. We do this in multiple passes so we // can group rules together. endpoints := make([]string, 0) endpointChains := make([]utiliptables.Chain, 0) for _, ep := range proxier.endpointsMap[svcName] { endpoints = append(endpoints, ep) endpointChain := servicePortEndpointChainName(svcName, protocol, ep) endpointChains = append(endpointChains, endpointChain) // Create the endpoint chain, retaining counters if possible. if chain, ok := existingNATChains[utiliptables.Chain(endpointChain)]; ok { writeLine(natChains, chain) } else { writeLine(natChains, makeChainLine(endpointChain)) } activeNATChains[endpointChain] = true } // First write session affinity rules, if applicable. if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { for _, endpointChain := range endpointChains { writeLine(natRules, "-A", string(svcChain), "-m", "comment", "--comment", svcName.String(), "-m", "recent", "--name", string(endpointChain), "--rcheck", "--seconds", fmt.Sprintf("%d", svcInfo.stickyMaxAgeSeconds), "--reap", "-j", string(endpointChain)) } } // Now write loadbalancing & DNAT rules. n := len(endpointChains) for i, endpointChain := range endpointChains { // Balancing rules in the per-service chain. args := []string{ "-A", string(svcChain), "-m", "comment", "--comment", svcName.String(), } if i < (n - 1) { // Each rule is a probabilistic match. args = append(args, "-m", "statistic", "--mode", "random", "--probability", fmt.Sprintf("%0.5f", 1.0/float64(n-i))) } // The final (or only if n == 1) rule is a guaranteed match. args = append(args, "-j", string(endpointChain)) writeLine(natRules, args...) // Rules in the per-endpoint chain. args = []string{ "-A", string(endpointChain), "-m", "comment", "--comment", svcName.String(), } // Handle traffic that loops back to the originator with SNAT. // Technically we only need to do this if the endpoint is on this // host, but we don't have that information, so we just do this for // all endpoints. // TODO: if we grow logic to get this node's pod CIDR, we can use it. writeLine(natRules, append(args, "-s", fmt.Sprintf("%s/32", strings.Split(endpoints[i], ":")[0]), "-j", string(kubeMarkMasqChain))...) // Update client-affinity lists. if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { args = append(args, "-m", "recent", "--name", string(endpointChain), "--set") } // DNAT to final destination. args = append(args, "-m", protocol, "-p", protocol, "-j", "DNAT", "--to-destination", endpoints[i]) writeLine(natRules, args...) } } // Delete chains no longer in use. for chain := range existingNATChains { if !activeNATChains[chain] { chainString := string(chain) if !strings.HasPrefix(chainString, "KUBE-SVC-") && !strings.HasPrefix(chainString, "KUBE-SEP-") { // Ignore chains that aren't ours. continue } // We must (as per iptables) write a chain-line for it, which has // the nice effect of flushing the chain. Then we can remove the // chain. writeLine(natChains, existingNATChains[chain]) writeLine(natRules, "-X", chainString) } } // Finally, tail-call to the nodeports chain. This needs to be after all // other service portal rules. writeLine(natRules, "-A", string(kubeServicesChain), "-m", "comment", "--comment", `"kubernetes service nodeports; NOTE: this must be the last rule in this chain"`, "-m", "addrtype", "--dst-type", "LOCAL", "-j", string(kubeNodePortsChain)) // Write the end-of-table markers. writeLine(filterRules, "COMMIT") writeLine(natRules, "COMMIT") // Sync rules. // NOTE: NoFlushTables is used so we don't flush non-kubernetes chains in the table. filterLines := append(filterChains.Bytes(), filterRules.Bytes()...) natLines := append(natChains.Bytes(), natRules.Bytes()...) lines := append(filterLines, natLines...) glog.V(3).Infof("Restoring iptables rules: %s", lines) err = proxier.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { glog.Errorf("Failed to execute iptables-restore: %v", err) // Revert new local ports. for k, v := range newLocalPorts { glog.Errorf("Closing local port %s", k.String()) v.Close() } return } // Close old local ports and save new ones. for k, v := range proxier.portsMap { if newLocalPorts[k] == nil { v.Close() } } proxier.portsMap = newLocalPorts // Clean up the older SNAT rule which was directly in POSTROUTING. // TODO(thockin): Remove this for v1.3 or v1.4. args := []string{ "-m", "comment", "--comment", "kubernetes service traffic requiring SNAT", "-m", "mark", "--mark", oldIptablesMasqueradeMark, "-j", "MASQUERADE", } if err := proxier.iptables.DeleteRule(utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil { if !utiliptables.IsNotFoundError(err) { glog.Errorf("Error removing old-style SNAT rule: %v", err) } } } // Join all words with spaces, terminate with newline and write to buf. func writeLine(buf *bytes.Buffer, words ...string) { buf.WriteString(strings.Join(words, " ") + "\n") } // return an iptables-save/restore formatted chain line given a Chain func makeChainLine(chain utiliptables.Chain) string { return fmt.Sprintf(":%s - [0:0]", chain) } // getChainLines parses a table's iptables-save data to find chains in the table. // It returns a map of iptables.Chain to string where the string is the chain line from the save (with counters etc). func getChainLines(table utiliptables.Table, save []byte) map[utiliptables.Chain]string { chainsMap := make(map[utiliptables.Chain]string) tablePrefix := "*" + string(table) readIndex := 0 // find beginning of table for readIndex < len(save) { line, n := readLine(readIndex, save) readIndex = n if strings.HasPrefix(line, tablePrefix) { break } } // parse table lines for readIndex < len(save) { line, n := readLine(readIndex, save) readIndex = n if len(line) == 0 { continue } if strings.HasPrefix(line, "COMMIT") || strings.HasPrefix(line, "*") { break } else if strings.HasPrefix(line, "#") { continue } else if strings.HasPrefix(line, ":") && len(line) > 1 { chain := utiliptables.Chain(strings.SplitN(line[1:], " ", 2)[0]) chainsMap[chain] = line } } return chainsMap } func readLine(readIndex int, byteArray []byte) (string, int) { currentReadIndex := readIndex // consume left spaces for currentReadIndex < len(byteArray) { if byteArray[currentReadIndex] == ' ' { currentReadIndex++ } else { break } } // leftTrimIndex stores the left index of the line after the line is left-trimmed leftTrimIndex := currentReadIndex // rightTrimIndex stores the right index of the line after the line is right-trimmed // it is set to -1 since the correct value has not yet been determined. rightTrimIndex := -1 for ; currentReadIndex < len(byteArray); currentReadIndex++ { if byteArray[currentReadIndex] == ' ' { // set rightTrimIndex if rightTrimIndex == -1 { rightTrimIndex = currentReadIndex } } else if (byteArray[currentReadIndex] == '\n') || (currentReadIndex == (len(byteArray) - 1)) { // end of line or byte buffer is reached if currentReadIndex <= leftTrimIndex { return "", currentReadIndex + 1 } // set the rightTrimIndex if rightTrimIndex == -1 { rightTrimIndex = currentReadIndex if currentReadIndex == (len(byteArray)-1) && (byteArray[currentReadIndex] != '\n') { // ensure that the last character is part of the returned string, // unless the last character is '\n' rightTrimIndex = currentReadIndex + 1 } } return string(byteArray[leftTrimIndex:rightTrimIndex]), currentReadIndex + 1 } else { // unset rightTrimIndex rightTrimIndex = -1 } } return "", currentReadIndex } func isLocalIP(ip string) (bool, error) { addrs, err := net.InterfaceAddrs() if err != nil { return false, err } for i := range addrs { intf, _, err := net.ParseCIDR(addrs[i].String()) if err != nil { return false, err } if net.ParseIP(ip).Equal(intf) { return true, nil } } return false, nil } func openLocalPort(lp *localPort) (closeable, error) { // For ports on node IPs, open the actual port and hold it, even though we // use iptables to redirect traffic. // This ensures a) that it's safe to use that port and b) that (a) stays // true. The risk is that some process on the node (e.g. sshd or kubelet) // is using a port and we give that same port out to a Service. That would // be bad because iptables would silently claim the traffic but the process // would never know. // NOTE: We should not need to have a real listen()ing socket - bind() // should be enough, but I can't figure out a way to e2e test without // it. Tools like 'ss' and 'netstat' do not show sockets that are // bind()ed but not listen()ed, and at least the default debian netcat // has no way to avoid about 10 seconds of retries. var socket closeable switch lp.protocol { case "tcp": listener, err := net.Listen("tcp", net.JoinHostPort(lp.ip, strconv.Itoa(lp.port))) if err != nil { return nil, err } socket = listener case "udp": addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(lp.ip, strconv.Itoa(lp.port))) if err != nil { return nil, err } conn, err := net.ListenUDP("udp", addr) if err != nil { return nil, err } socket = conn default: return nil, fmt.Errorf("unknown protocol %q", lp.protocol) } glog.V(2).Infof("Opened local port %s", lp.String()) return socket, nil }
pkg/proxy/iptables/proxier.go
1
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.22576287388801575, 0.002446807688102126, 0.0001629889156902209, 0.00017207172641064972, 0.021329166367650032 ]
{ "id": 5, "code_window": [ "\t\"encoding/base32\"\n", "\t\"fmt\"\n", "\t\"net\"\n", "\t\"reflect\"\n", "\t\"strconv\"\n", "\t\"strings\"\n", "\t\"sync\"\n", "\t\"time\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"os\"\n" ], "file_path": "pkg/proxy/iptables/proxier.go", "type": "add", "edit_start_line_idx": 28 }
"\"\"\""
Godeps/_workspace/src/github.com/jmespath/go-jmespath/fuzz/corpus/expr-27
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00017579988343641162, 0.00017579988343641162, 0.00017579988343641162, 0.00017579988343641162, 0 ]
{ "id": 5, "code_window": [ "\t\"encoding/base32\"\n", "\t\"fmt\"\n", "\t\"net\"\n", "\t\"reflect\"\n", "\t\"strconv\"\n", "\t\"strings\"\n", "\t\"sync\"\n", "\t\"time\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"os\"\n" ], "file_path": "pkg/proxy/iptables/proxier.go", "type": "add", "edit_start_line_idx": 28 }
package firewalls import "fmt" func err(str string) error { return fmt.Errorf("%s", str) } var ( errPolicyRequired = err("A policy ID is required") )
Godeps/_workspace/src/github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00023352805874310434, 0.00020570357446558774, 0.0001778790756361559, 0.00020570357446558774, 0.000027824491553474218 ]
{ "id": 5, "code_window": [ "\t\"encoding/base32\"\n", "\t\"fmt\"\n", "\t\"net\"\n", "\t\"reflect\"\n", "\t\"strconv\"\n", "\t\"strings\"\n", "\t\"sync\"\n", "\t\"time\"\n" ], "labels": [ "keep", "keep", "add", "keep", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\"os\"\n" ], "file_path": "pkg/proxy/iptables/proxier.go", "type": "add", "edit_start_line_idx": 28 }
/* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spdy import ( "fmt" "net/http" "strings" "k8s.io/kubernetes/pkg/util/httpstream" "k8s.io/kubernetes/pkg/util/runtime" ) const HeaderSpdy31 = "SPDY/3.1" // responseUpgrader knows how to upgrade HTTP responses. It // implements the httpstream.ResponseUpgrader interface. type responseUpgrader struct { } // NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is // capable of upgrading HTTP responses using SPDY/3.1 via the // spdystream package. func NewResponseUpgrader() httpstream.ResponseUpgrader { return responseUpgrader{} } // UpgradeResponse upgrades an HTTP response to one that supports multiplexed // streams. newStreamHandler will be called synchronously whenever the // other end of the upgraded connection creates a new stream. func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { w.WriteHeader(http.StatusBadRequest) fmt.Fprintf(w, "unable to upgrade: missing upgrade headers in request: %#v", req.Header) return nil } hijacker, ok := w.(http.Hijacker) if !ok { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "unable to upgrade: unable to hijack response") return nil } w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31) w.WriteHeader(http.StatusSwitchingProtocols) conn, _, err := hijacker.Hijack() if err != nil { runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err)) return nil } spdyConn, err := NewServerConnection(conn, newStreamHandler) if err != nil { runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err)) return nil } return spdyConn }
pkg/util/httpstream/spdy/upgrade.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.005024791229516268, 0.001252002315595746, 0.0001677573163760826, 0.00017206789925694466, 0.00189149787183851 ]
{ "id": 6, "code_window": [ "\tif err := utilsysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {\n", "\t\treturn nil, fmt.Errorf(\"can't set sysctl %s: %v\", sysctlRouteLocalnet, err)\n", "\t}\n", "\n", "\t// Load the module. It's OK if this fails (e.g. the module is not present)\n", "\t// because we'll catch the error on the sysctl, which is what we actually\n", "\t// care about.\n", "\texec.Command(\"modprobe\", \"br-netfilter\").CombinedOutput()\n", "\tif err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil {\n", "\t\tglog.Warningf(\"can't set sysctl %s: %v\", sysctlBridgeCallIptables, err)\n", "\t}\n", "\n", "\t// Generate the masquerade mark to use for SNAT rules.\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t// Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers\n", "\t// are connected to a Linux bridge (but not SDN bridges). Until most\n", "\t// plugins handle this, log when config is missing\n", "\twarnBrNetfilter := false\n", "\tif _, err := os.Stat(\"/sys/module/br_netfilter\"); os.IsNotExist(err) {\n", "\t\twarnBrNetfilter = true\n", "\t}\n", "\tif val, err := utilsysctl.GetSysctl(sysctlBridgeCallIptables); err == nil && val != 1 {\n", "\t\twarnBrNetfilter = true\n", "\t}\n", "\tif warnBrNetfilter {\n", "\t\tglog.Infof(\"missing br-netfilter module or unset br-nf-call-iptables; proxy may not work as intended\")\n" ], "file_path": "pkg/proxy/iptables/proxier.go", "type": "replace", "edit_start_line_idx": 192 }
/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package network import ( "fmt" "net" "strings" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "github.com/golang/glog" "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/validation" ) const DefaultPluginName = "kubernetes.io/no-op" // Called when the node's Pod CIDR is known when using the // controller manager's --allocate-node-cidrs=true option const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change" const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr" // Plugin is an interface to network plugins for the kubelet type NetworkPlugin interface { // Init initializes the plugin. This will be called exactly once // before any other methods are called. Init(host Host) error // Called on various events like: // NET_PLUGIN_EVENT_POD_CIDR_CHANGE Event(name string, details map[string]interface{}) // Name returns the plugin's name. This will be used when searching // for a plugin by name, e.g. Name() string // SetUpPod is the method called after the infra container of // the pod has been created but before the other containers of the // pod are launched. SetUpPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // TearDownPod is the method called before a pod's infra container will be deleted TearDownPod(namespace string, name string, podInfraContainerID kubecontainer.DockerID) error // Status is the method called to obtain the ipv4 or ipv6 addresses of the container Status(namespace string, name string, podInfraContainerID kubecontainer.DockerID) (*PodNetworkStatus, error) } // PodNetworkStatus stores the network status of a pod (currently just the primary IP address) // This struct represents version "v1beta1" type PodNetworkStatus struct { unversioned.TypeMeta `json:",inline"` // IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that - // - kube expects to be reachable across the cluster // - service endpoints are constructed with // - will be reported in the PodStatus.PodIP field (will override the IP reported by docker) IP net.IP `json:"ip" description:"Primary IP address of the pod"` } // Host is an interface that plugins can use to access the kubelet. type Host interface { // Get the pod structure by its name, namespace GetPodByName(namespace, name string) (*api.Pod, bool) // GetKubeClient returns a client interface GetKubeClient() clientset.Interface // GetContainerRuntime returns the container runtime that implements the containers (e.g. docker/rkt) GetRuntime() kubecontainer.Runtime } // InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names. func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host) (NetworkPlugin, error) { if networkPluginName == "" { // default to the no_op plugin plug := &noopNetworkPlugin{} return plug, nil } pluginMap := map[string]NetworkPlugin{} allErrs := []error{} for _, plugin := range plugins { name := plugin.Name() if !validation.IsQualifiedName(name) { allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %#v", plugin)) continue } if _, found := pluginMap[name]; found { allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name)) continue } pluginMap[name] = plugin } chosenPlugin := pluginMap[networkPluginName] if chosenPlugin != nil { err := chosenPlugin.Init(host) if err != nil { allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err)) } else { glog.V(1).Infof("Loaded network plugin %q", networkPluginName) } } else { allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName)) } return chosenPlugin, utilerrors.NewAggregate(allErrs) } func UnescapePluginName(in string) string { return strings.Replace(in, "~", "/", -1) } type noopNetworkPlugin struct { } func (plugin *noopNetworkPlugin) Init(host Host) error { return nil } func (plugin *noopNetworkPlugin) Event(name string, details map[string]interface{}) { } func (plugin *noopNetworkPlugin) Name() string { return DefaultPluginName } func (plugin *noopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.DockerID) error { return nil } func (plugin *noopNetworkPlugin) Status(namespace string, name string, id kubecontainer.DockerID) (*PodNetworkStatus, error) { return nil, nil }
pkg/kubelet/network/plugins.go
1
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.0002491507912054658, 0.00017565253074280918, 0.0001582236582180485, 0.00016943772789090872, 0.000021240881324047223 ]
{ "id": 6, "code_window": [ "\tif err := utilsysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {\n", "\t\treturn nil, fmt.Errorf(\"can't set sysctl %s: %v\", sysctlRouteLocalnet, err)\n", "\t}\n", "\n", "\t// Load the module. It's OK if this fails (e.g. the module is not present)\n", "\t// because we'll catch the error on the sysctl, which is what we actually\n", "\t// care about.\n", "\texec.Command(\"modprobe\", \"br-netfilter\").CombinedOutput()\n", "\tif err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil {\n", "\t\tglog.Warningf(\"can't set sysctl %s: %v\", sysctlBridgeCallIptables, err)\n", "\t}\n", "\n", "\t// Generate the masquerade mark to use for SNAT rules.\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t// Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers\n", "\t// are connected to a Linux bridge (but not SDN bridges). Until most\n", "\t// plugins handle this, log when config is missing\n", "\twarnBrNetfilter := false\n", "\tif _, err := os.Stat(\"/sys/module/br_netfilter\"); os.IsNotExist(err) {\n", "\t\twarnBrNetfilter = true\n", "\t}\n", "\tif val, err := utilsysctl.GetSysctl(sysctlBridgeCallIptables); err == nil && val != 1 {\n", "\t\twarnBrNetfilter = true\n", "\t}\n", "\tif warnBrNetfilter {\n", "\t\tglog.Infof(\"missing br-netfilter module or unset br-nf-call-iptables; proxy may not work as intended\")\n" ], "file_path": "pkg/proxy/iptables/proxier.go", "type": "replace", "edit_start_line_idx": 192 }
/* Gomega is the Ginkgo BDD-style testing framework's preferred matcher library. The godoc documentation describes Gomega's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/gomega/ Gomega on Github: http://github.com/onsi/gomega Learn more about Ginkgo online: http://onsi.github.io/ginkgo Ginkgo on Github: http://github.com/onsi/ginkgo Gomega is MIT-Licensed */ package gomega import ( "fmt" "reflect" "time" "github.com/onsi/gomega/internal/assertion" "github.com/onsi/gomega/internal/asyncassertion" "github.com/onsi/gomega/internal/testingtsupport" "github.com/onsi/gomega/types" ) const GOMEGA_VERSION = "1.0" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). Alternatively, you may have forgotten to register a fail handler with RegisterFailHandler() or RegisterTestingT(). ` var globalFailHandler types.GomegaFailHandler var defaultEventuallyTimeout = time.Second var defaultEventuallyPollingInterval = 10 * time.Millisecond var defaultConsistentlyDuration = 100 * time.Millisecond var defaultConsistentlyPollingInterval = 10 * time.Millisecond //RegisterFailHandler connects Ginkgo to Gomega. When a matcher fails //the fail handler passed into RegisterFailHandler is called. func RegisterFailHandler(handler types.GomegaFailHandler) { globalFailHandler = handler } //RegisterTestingT connects Gomega to Golang's XUnit style //Testing.T tests. You'll need to call this at the top of each XUnit style test: // // func TestFarmHasCow(t *testing.T) { // RegisterTestingT(t) // // f := farm.New([]string{"Cow", "Horse"}) // Expect(f.HasCow()).To(BeTrue(), "Farm should have cow") // } // // Note that this *testing.T is registered *globally* by Gomega (this is why you don't have to // pass `t` down to the matcher itself). This means that you cannot run the XUnit style tests // in parallel as the global fail handler cannot point to more than one testing.T at a time. // // (As an aside: Ginkgo gets around this limitation by running parallel tests in different *processes*). func RegisterTestingT(t types.GomegaTestingT) { RegisterFailHandler(testingtsupport.BuildTestingTGomegaFailHandler(t)) } //InterceptGomegaHandlers runs a given callback and returns an array of //failure messages generated by any Gomega assertions within the callback. // //This is accomplished by temporarily replacing the *global* fail handler //with a fail handler that simply annotates failures. The original fail handler //is reset when InterceptGomegaFailures returns. // //This is most useful when testing custom matchers, but can also be used to check //on a value using a Gomega assertion without causing a test failure. func InterceptGomegaFailures(f func()) []string { originalHandler := globalFailHandler failures := []string{} RegisterFailHandler(func(message string, callerSkip ...int) { failures = append(failures, message) }) f() RegisterFailHandler(originalHandler) return failures } //Ω wraps an actual value allowing assertions to be made on it: // Ω("foo").Should(Equal("foo")) // //If Ω is passed more than one argument it will pass the *first* argument to the matcher. //All subsequent arguments will be required to be nil/zero. // //This is convenient if you want to make an assertion on a method/function that returns //a value and an error - a common patter in Go. // //For example, given a function with signature: // func MyAmazingThing() (int, error) // //Then: // Ω(MyAmazingThing()).Should(Equal(3)) //Will succeed only if `MyAmazingThing()` returns `(3, nil)` // //Ω and Expect are identical func Ω(actual interface{}, extra ...interface{}) GomegaAssertion { return ExpectWithOffset(0, actual, extra...) } //Expect wraps an actual value allowing assertions to be made on it: // Expect("foo").To(Equal("foo")) // //If Expect is passed more than one argument it will pass the *first* argument to the matcher. //All subsequent arguments will be required to be nil/zero. // //This is convenient if you want to make an assertion on a method/function that returns //a value and an error - a common patter in Go. // //For example, given a function with signature: // func MyAmazingThing() (int, error) // //Then: // Expect(MyAmazingThing()).Should(Equal(3)) //Will succeed only if `MyAmazingThing()` returns `(3, nil)` // //Expect and Ω are identical func Expect(actual interface{}, extra ...interface{}) GomegaAssertion { return ExpectWithOffset(0, actual, extra...) } //ExpectWithOffset wraps an actual value allowing assertions to be made on it: // ExpectWithOffset(1, "foo").To(Equal("foo")) // //Unlike `Expect` and `Ω`, `ExpectWithOffset` takes an additional integer argument //this is used to modify the call-stack offset when computing line numbers. // //This is most useful in helper functions that make assertions. If you want Gomega's //error message to refer to the calling line in the test (as opposed to the line in the helper function) //set the first argument of `ExpectWithOffset` appropriately. func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) GomegaAssertion { if globalFailHandler == nil { panic(nilFailHandlerPanic) } return assertion.New(actual, globalFailHandler, offset, extra...) } //Eventually wraps an actual value allowing assertions to be made on it. //The assertion is tried periodically until it passes or a timeout occurs. // //Both the timeout and polling interval are configurable as optional arguments: //The first optional argument is the timeout //The second optional argument is the polling interval // //Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the //last case they are interpreted as seconds. // //If Eventually is passed an actual that is a function taking no arguments and returning at least one value, //then Eventually will call the function periodically and try the matcher against the function's first return value. // //Example: // // Eventually(func() int { // return thingImPolling.Count() // }).Should(BeNumerically(">=", 17)) // //Note that this example could be rewritten: // // Eventually(thingImPolling.Count).Should(BeNumerically(">=", 17)) // //If the function returns more than one value, then Eventually will pass the first value to the matcher and //assert that all other values are nil/zero. //This allows you to pass Eventually a function that returns a value and an error - a common pattern in Go. // //For example, consider a method that returns a value and an error: // func FetchFromDB() (string, error) // //Then // Eventually(FetchFromDB).Should(Equal("hasselhoff")) // //Will pass only if the the returned error is nil and the returned string passes the matcher. // //Eventually's default timeout is 1 second, and its default polling interval is 10ms func Eventually(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { return EventuallyWithOffset(0, actual, intervals...) } //EventuallyWithOffset operates like Eventually but takes an additional //initial argument to indicate an offset in the call stack. This is useful when building helper //functions that contain matchers. To learn more, read about `ExpectWithOffset`. func EventuallyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { if globalFailHandler == nil { panic(nilFailHandlerPanic) } timeoutInterval := defaultEventuallyTimeout pollingInterval := defaultEventuallyPollingInterval if len(intervals) > 0 { timeoutInterval = toDuration(intervals[0]) } if len(intervals) > 1 { pollingInterval = toDuration(intervals[1]) } return asyncassertion.New(asyncassertion.AsyncAssertionTypeEventually, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) } //Consistently wraps an actual value allowing assertions to be made on it. //The assertion is tried periodically and is required to pass for a period of time. // //Both the total time and polling interval are configurable as optional arguments: //The first optional argument is the duration that Consistently will run for //The second optional argument is the polling interval // //Both intervals can either be specified as time.Duration, parsable duration strings or as floats/integers. In the //last case they are interpreted as seconds. // //If Consistently is passed an actual that is a function taking no arguments and returning at least one value, //then Consistently will call the function periodically and try the matcher against the function's first return value. // //If the function returns more than one value, then Consistently will pass the first value to the matcher and //assert that all other values are nil/zero. //This allows you to pass Consistently a function that returns a value and an error - a common pattern in Go. // //Consistently is useful in cases where you want to assert that something *does not happen* over a period of tiem. //For example, you want to assert that a goroutine does *not* send data down a channel. In this case, you could: // // Consistently(channel).ShouldNot(Receive()) // //Consistently's default duration is 100ms, and its default polling interval is 10ms func Consistently(actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { return ConsistentlyWithOffset(0, actual, intervals...) } //ConsistentlyWithOffset operates like Consistnetly but takes an additional //initial argument to indicate an offset in the call stack. This is useful when building helper //functions that contain matchers. To learn more, read about `ExpectWithOffset`. func ConsistentlyWithOffset(offset int, actual interface{}, intervals ...interface{}) GomegaAsyncAssertion { if globalFailHandler == nil { panic(nilFailHandlerPanic) } timeoutInterval := defaultConsistentlyDuration pollingInterval := defaultConsistentlyPollingInterval if len(intervals) > 0 { timeoutInterval = toDuration(intervals[0]) } if len(intervals) > 1 { pollingInterval = toDuration(intervals[1]) } return asyncassertion.New(asyncassertion.AsyncAssertionTypeConsistently, actual, globalFailHandler, timeoutInterval, pollingInterval, offset) } //Set the default timeout duration for Eventually. Eventually will repeatedly poll your condition until it succeeds, or until this timeout elapses. func SetDefaultEventuallyTimeout(t time.Duration) { defaultEventuallyTimeout = t } //Set the default polling interval for Eventually. func SetDefaultEventuallyPollingInterval(t time.Duration) { defaultEventuallyPollingInterval = t } //Set the default duration for Consistently. Consistently will verify that your condition is satsified for this long. func SetDefaultConsistentlyDuration(t time.Duration) { defaultConsistentlyDuration = t } //Set the default polling interval for Consistently. func SetDefaultConsistentlyPollingInterval(t time.Duration) { defaultConsistentlyPollingInterval = t } //GomegaAsyncAssertion is returned by Eventually and Consistently and polls the actual value passed into Eventually against //the matcher passed to the Should and ShouldNot methods. // //Both Should and ShouldNot take a variadic optionalDescription argument. This is passed on to //fmt.Sprintf() and is used to annotate failure messages. This allows you to make your failure messages more //descriptive // //Both Should and ShouldNot return a boolean that is true if the assertion passed and false if it failed. // //Example: // // Eventually(myChannel).Should(Receive(), "Something should have come down the pipe.") // Consistently(myChannel).ShouldNot(Receive(), "Nothing should have come down the pipe.") type GomegaAsyncAssertion interface { Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool } //GomegaAssertion is returned by Ω and Expect and compares the actual value to the matcher //passed to the Should/ShouldNot and To/ToNot/NotTo methods. // //Typically Should/ShouldNot are used with Ω and To/ToNot/NotTo are used with Expect //though this is not enforced. // //All methods take a variadic optionalDescription argument. This is passed on to fmt.Sprintf() //and is used to annotate failure messages. // //All methods return a bool that is true if hte assertion passed and false if it failed. // //Example: // // Ω(farm.HasCow()).Should(BeTrue(), "Farm %v should have a cow", farm) type GomegaAssertion interface { Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool } //OmegaMatcher is deprecated in favor of the better-named and better-organized types.GomegaMatcher but sticks around to support existing code that uses it type OmegaMatcher types.GomegaMatcher func toDuration(input interface{}) time.Duration { duration, ok := input.(time.Duration) if ok { return duration } value := reflect.ValueOf(input) kind := reflect.TypeOf(input).Kind() if reflect.Int <= kind && kind <= reflect.Int64 { return time.Duration(value.Int()) * time.Second } else if reflect.Uint <= kind && kind <= reflect.Uint64 { return time.Duration(value.Uint()) * time.Second } else if reflect.Float32 <= kind && kind <= reflect.Float64 { return time.Duration(value.Float() * float64(time.Second)) } else if reflect.String == kind { duration, err := time.ParseDuration(value.String()) if err != nil { panic(fmt.Sprintf("%#v is not a valid parsable duration string.", input)) } return duration } panic(fmt.Sprintf("%v is not a valid interval. Must be time.Duration, parsable duration string or a number.", input)) }
Godeps/_workspace/src/github.com/onsi/gomega/gomega_dsl.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00024760322412475944, 0.00017374948947690427, 0.00016176575445570052, 0.0001729917712509632, 0.000013610575479106046 ]
{ "id": 6, "code_window": [ "\tif err := utilsysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {\n", "\t\treturn nil, fmt.Errorf(\"can't set sysctl %s: %v\", sysctlRouteLocalnet, err)\n", "\t}\n", "\n", "\t// Load the module. It's OK if this fails (e.g. the module is not present)\n", "\t// because we'll catch the error on the sysctl, which is what we actually\n", "\t// care about.\n", "\texec.Command(\"modprobe\", \"br-netfilter\").CombinedOutput()\n", "\tif err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil {\n", "\t\tglog.Warningf(\"can't set sysctl %s: %v\", sysctlBridgeCallIptables, err)\n", "\t}\n", "\n", "\t// Generate the masquerade mark to use for SNAT rules.\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t// Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers\n", "\t// are connected to a Linux bridge (but not SDN bridges). Until most\n", "\t// plugins handle this, log when config is missing\n", "\twarnBrNetfilter := false\n", "\tif _, err := os.Stat(\"/sys/module/br_netfilter\"); os.IsNotExist(err) {\n", "\t\twarnBrNetfilter = true\n", "\t}\n", "\tif val, err := utilsysctl.GetSysctl(sysctlBridgeCallIptables); err == nil && val != 1 {\n", "\t\twarnBrNetfilter = true\n", "\t}\n", "\tif warnBrNetfilter {\n", "\t\tglog.Infof(\"missing br-netfilter module or unset br-nf-call-iptables; proxy may not work as intended\")\n" ], "file_path": "pkg/proxy/iptables/proxier.go", "type": "replace", "edit_start_line_idx": 192 }
// Copyright 2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package invoke import ( "bytes" "encoding/json" "fmt" "os" "os/exec" "path/filepath" "github.com/appc/cni/pkg/types" ) func pluginErr(err error, output []byte) error { if _, ok := err.(*exec.ExitError); ok { emsg := types.Error{} if perr := json.Unmarshal(output, &emsg); perr != nil { return fmt.Errorf("netplugin failed but error parsing its diagnostic message %q: %v", string(output), perr) } details := "" if emsg.Details != "" { details = fmt.Sprintf("; %v", emsg.Details) } return fmt.Errorf("%v%v", emsg.Msg, details) } return err } func ExecPluginWithResult(pluginPath string, netconf []byte, args CNIArgs) (*types.Result, error) { stdoutBytes, err := execPlugin(pluginPath, netconf, args) if err != nil { return nil, err } res := &types.Result{} err = json.Unmarshal(stdoutBytes, res) return res, err } func ExecPluginWithoutResult(pluginPath string, netconf []byte, args CNIArgs) error { _, err := execPlugin(pluginPath, netconf, args) return err } func execPlugin(pluginPath string, netconf []byte, args CNIArgs) ([]byte, error) { if pluginPath == "" { return nil, fmt.Errorf("could not find %q plugin", filepath.Base(pluginPath)) } stdout := &bytes.Buffer{} c := exec.Cmd{ Env: args.AsEnv(), Path: pluginPath, Args: []string{pluginPath}, Stdin: bytes.NewBuffer(netconf), Stdout: stdout, Stderr: os.Stderr, } if err := c.Run(); err != nil { return nil, pluginErr(err, stdout.Bytes()) } return stdout.Bytes(), nil }
Godeps/_workspace/src/github.com/appc/cni/pkg/invoke/exec.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.001452909898944199, 0.00032795139122754335, 0.00016341936134267598, 0.00016906495147850364, 0.00040027700015343726 ]
{ "id": 6, "code_window": [ "\tif err := utilsysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil {\n", "\t\treturn nil, fmt.Errorf(\"can't set sysctl %s: %v\", sysctlRouteLocalnet, err)\n", "\t}\n", "\n", "\t// Load the module. It's OK if this fails (e.g. the module is not present)\n", "\t// because we'll catch the error on the sysctl, which is what we actually\n", "\t// care about.\n", "\texec.Command(\"modprobe\", \"br-netfilter\").CombinedOutput()\n", "\tif err := utilsysctl.SetSysctl(sysctlBridgeCallIptables, 1); err != nil {\n", "\t\tglog.Warningf(\"can't set sysctl %s: %v\", sysctlBridgeCallIptables, err)\n", "\t}\n", "\n", "\t// Generate the masquerade mark to use for SNAT rules.\n" ], "labels": [ "keep", "keep", "keep", "keep", "replace", "replace", "replace", "replace", "replace", "replace", "keep", "keep", "keep" ], "after_edit": [ "\t// Proxy needs br_netfilter and bridge-nf-call-iptables=1 when containers\n", "\t// are connected to a Linux bridge (but not SDN bridges). Until most\n", "\t// plugins handle this, log when config is missing\n", "\twarnBrNetfilter := false\n", "\tif _, err := os.Stat(\"/sys/module/br_netfilter\"); os.IsNotExist(err) {\n", "\t\twarnBrNetfilter = true\n", "\t}\n", "\tif val, err := utilsysctl.GetSysctl(sysctlBridgeCallIptables); err == nil && val != 1 {\n", "\t\twarnBrNetfilter = true\n", "\t}\n", "\tif warnBrNetfilter {\n", "\t\tglog.Infof(\"missing br-netfilter module or unset br-nf-call-iptables; proxy may not work as intended\")\n" ], "file_path": "pkg/proxy/iptables/proxier.go", "type": "replace", "edit_start_line_idx": 192 }
// flocker package allows you to easily interact with a Flocker Control Service. package flocker
Godeps/_workspace/src/github.com/ClusterHQ/flocker-go/doc.go
0
https://github.com/kubernetes/kubernetes/commit/6248939e11a4d5b422da5ffdc7ec52a6c1ded54a
[ 0.00016766952467150986, 0.00016766952467150986, 0.00016766952467150986, 0.00016766952467150986, 0 ]
{ "id": 0, "code_window": [ "\n", "func TestAbsUrlify(t *testing.T) {\n", "\tfiles := make(map[string][]byte)\n", "\ttarget := &InMemoryTarget{files: files}\n", "\ts := &Site{\n", "\t\tTarget: target,\n", "\t\tConfig: Config{BaseUrl: \"http://auth/bub/\"},\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tsources := []byteSource{\n", "\t\t{\"sect/doc1.html\", []byte(\"<!doctype html><html><head></head><body><a href=\\\"#frag1\\\">link</a></body></html>\")},\n", "\t\t{\"content/blue/doc2.html\", []byte(\"---\\nf: t\\n---\\n<!doctype html><html><body>more content</body></html>\")},\n", "\t}\n" ], "file_path": "hugolib/site_test.go", "type": "add", "edit_start_line_idx": 250 }
package transform import ( htmltran "code.google.com/p/go-html-transform/html/transform" "io" "net/url" ) type Transformer struct { BaseURL string } func (t *Transformer) Apply(r io.Reader, w io.Writer) (err error) { var tr *htmltran.Transformer if tr, err = htmltran.NewFromReader(r); err != nil { return } if err = t.absUrlify(tr, elattr{"a", "href"}, elattr{"script", "src"}); err != nil { return } return tr.Render(w) } type elattr struct { tag, attr string } func (t *Transformer) absUrlify(tr *htmltran.Transformer, selectors ...elattr) (err error) { var baseURL, inURL *url.URL if baseURL, err = url.Parse(t.BaseURL); err != nil { return } replace := func(in string) string { if inURL, err = url.Parse(in); err != nil { return in + "?" } return baseURL.ResolveReference(inURL).String() } for _, el := range selectors { if err = tr.Apply(htmltran.TransformAttrib(el.attr, replace), el.tag); err != nil { return } } return }
transform/post.go
1
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.012161735445261002, 0.002332675503566861, 0.00016614393098279834, 0.00017407901759725064, 0.004410333465784788 ]
{ "id": 0, "code_window": [ "\n", "func TestAbsUrlify(t *testing.T) {\n", "\tfiles := make(map[string][]byte)\n", "\ttarget := &InMemoryTarget{files: files}\n", "\ts := &Site{\n", "\t\tTarget: target,\n", "\t\tConfig: Config{BaseUrl: \"http://auth/bub/\"},\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tsources := []byteSource{\n", "\t\t{\"sect/doc1.html\", []byte(\"<!doctype html><html><head></head><body><a href=\\\"#frag1\\\">link</a></body></html>\")},\n", "\t\t{\"content/blue/doc2.html\", []byte(\"---\\nf: t\\n---\\n<!doctype html><html><body>more content</body></html>\")},\n", "\t}\n" ], "file_path": "hugolib/site_test.go", "type": "add", "edit_start_line_idx": 250 }
package target import ( "bytes" helpers "github.com/spf13/hugo/template" "html/template" "path" "strings" ) const ALIAS = "<!DOCTYPE html><html><head><link rel=\"canonical\" href=\"{{ .Permalink }}\"/><meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\" /><meta http-equiv=\"refresh\" content=\"0;url={{ .Permalink }}\" /></head></html>" const ALIAS_XHTML = "<!DOCTYPE html><html xmlns=\"http://www.w3.org/1999/xhtml\"><head><link rel=\"canonical\" href=\"{{ .Permalink }}\"/><meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\" /><meta http-equiv=\"refresh\" content=\"0;url={{ .Permalink }}\" /></head></html>" var DefaultAliasTemplates *template.Template func init() { DefaultAliasTemplates = template.New("") template.Must(DefaultAliasTemplates.New("alias").Parse(ALIAS)) template.Must(DefaultAliasTemplates.New("alias-xhtml").Parse(ALIAS_XHTML)) } type AliasPublisher interface { Translator Publish(string, template.HTML) error } type HTMLRedirectAlias struct { PublishDir string Templates *template.Template } func (h *HTMLRedirectAlias) Translate(alias string) (aliasPath string, err error) { if len(alias) <= 0 { return } if strings.HasSuffix(alias, "/") { alias = alias + "index.html" } else if !strings.HasSuffix(alias, ".html") { alias = alias + "/index.html" } return path.Join(h.PublishDir, helpers.Urlize(alias)), nil } type AliasNode struct { Permalink template.HTML } func (h *HTMLRedirectAlias) Publish(path string, permalink template.HTML) (err error) { if path, err = h.Translate(path); err != nil { return } t := "alias" if strings.HasSuffix(path, ".xhtml") { t = "alias-xhtml" } template := DefaultAliasTemplates if h.Templates != nil { template = h.Templates } buffer := new(bytes.Buffer) err = template.ExecuteTemplate(buffer, t, &AliasNode{permalink}) if err != nil { return } return writeToDisk(path, buffer) }
target/htmlredirect.go
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.0008259891183115542, 0.00025254025240428746, 0.00016448600217700005, 0.0001710697397356853, 0.00021677158656530082 ]
{ "id": 0, "code_window": [ "\n", "func TestAbsUrlify(t *testing.T) {\n", "\tfiles := make(map[string][]byte)\n", "\ttarget := &InMemoryTarget{files: files}\n", "\ts := &Site{\n", "\t\tTarget: target,\n", "\t\tConfig: Config{BaseUrl: \"http://auth/bub/\"},\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tsources := []byteSource{\n", "\t\t{\"sect/doc1.html\", []byte(\"<!doctype html><html><head></head><body><a href=\\\"#frag1\\\">link</a></body></html>\")},\n", "\t\t{\"content/blue/doc2.html\", []byte(\"---\\nf: t\\n---\\n<!doctype html><html><body>more content</body></html>\")},\n", "\t}\n" ], "file_path": "hugolib/site_test.go", "type": "add", "edit_start_line_idx": 250 }
package parser import ( "bufio" "bytes" "errors" "fmt" "io" "unicode" ) const ( HTML_LEAD = "<" YAML_LEAD = "-" YAML_DELIM_UNIX = "---\n" YAML_DELIM_DOS = "---\r\n" TOML_LEAD = "+" TOML_DELIM_UNIX = "+++\n" TOML_DELIM_DOS = "+++\r\n" JAVA_LEAD = "{" ) var ( delims = [][]byte{ []byte(YAML_DELIM_UNIX), []byte(YAML_DELIM_DOS), []byte(TOML_DELIM_UNIX), []byte(TOML_DELIM_DOS), []byte(JAVA_LEAD), } unixEnding = []byte("\n") dosEnding = []byte("\r\n") ) type FrontMatter []byte type Content []byte type Page interface { FrontMatter() FrontMatter Content() Content IsRenderable() bool } type page struct { render bool frontmatter FrontMatter content Content } func (p *page) Content() Content { return p.content } func (p *page) FrontMatter() FrontMatter { return p.frontmatter } func (p *page) IsRenderable() bool { return p.render } // ReadFrom reads the content from an io.Reader and constructs a page. func ReadFrom(r io.Reader) (p Page, err error) { reader := bufio.NewReader(r) if err = chompWhitespace(reader); err != nil { return } firstLine, err := peekLine(reader) if err != nil { return } newp := new(page) newp.render = shouldRender(firstLine) if newp.render && isFrontMatterDelim(firstLine) { left, right := determineDelims(firstLine) fm, err := extractFrontMatterDelims(reader, left, right) if err != nil { return nil, err } newp.frontmatter = fm } content, err := extractContent(reader) if err != nil { return nil, err } newp.content = content return newp, nil } func chompWhitespace(r io.RuneScanner) (err error) { for { c, _, err := r.ReadRune() if err != nil { return err } if !unicode.IsSpace(c) { r.UnreadRune() return nil } } return } func peekLine(r *bufio.Reader) (line []byte, err error) { firstFive, err := r.Peek(5) if err != nil { return } idx := bytes.IndexByte(firstFive, '\n') if idx == -1 { return firstFive, nil } idx += 1 // include newline. return firstFive[:idx], nil } func shouldRender(lead []byte) (frontmatter bool) { if len(lead) <= 0 { return } if bytes.Equal(lead[:1], []byte(HTML_LEAD)) { return } return true } func isFrontMatterDelim(data []byte) bool { for _, d := range delims { if bytes.HasPrefix(data, d) { return true } } return false } func determineDelims(firstLine []byte) (left, right []byte) { switch len(firstLine) { case 4: if firstLine[0] == YAML_LEAD[0] { return []byte(YAML_DELIM_UNIX), []byte(YAML_DELIM_UNIX) } return []byte(TOML_DELIM_UNIX), []byte(TOML_DELIM_UNIX) case 5: if firstLine[0] == YAML_LEAD[0] { return []byte(YAML_DELIM_DOS), []byte(YAML_DELIM_DOS) } return []byte(TOML_DELIM_DOS), []byte(TOML_DELIM_DOS) case 3: fallthrough case 2: fallthrough case 1: return []byte(JAVA_LEAD), []byte("}") default: panic(fmt.Sprintf("Unable to determine delims from %q", firstLine)) } return } func extractFrontMatterDelims(r *bufio.Reader, left, right []byte) (fm FrontMatter, err error) { var ( c byte level int = 0 bytesRead int = 0 sameDelim = bytes.Equal(left, right) ) wr := new(bytes.Buffer) for { if c, err = r.ReadByte(); err != nil { return nil, fmt.Errorf("Unable to read frontmatter at filepos %d: %s", bytesRead, err) } bytesRead += 1 switch c { case left[0]: var ( buf []byte = []byte{c} remaining []byte ) if remaining, err = r.Peek(len(left) - 1); err != nil { return nil, err } buf = append(buf, remaining...) if bytes.Equal(buf, left) { if sameDelim { if level == 0 { level = 1 } else { level = 0 } } else { level += 1 } } if _, err = wr.Write([]byte{c}); err != nil { return nil, err } if level == 0 { if _, err = r.Read(remaining); err != nil { return nil, err } if _, err = wr.Write(remaining); err != nil { return nil, err } } case right[0]: match, err := matches(r, wr, []byte{c}, right) if err != nil { return nil, err } if match { level -= 1 } default: if err = wr.WriteByte(c); err != nil { return nil, err } } if level == 0 && !unicode.IsSpace(rune(c)) { if err = chompWhitespace(r); err != nil { if err != io.EOF { return nil, err } } return wr.Bytes(), nil } } return nil, errors.New("Could not find front matter.") } func matches_quick(buf, expected []byte) (ok bool, err error) { return bytes.Equal(expected, buf), nil } func matches(r *bufio.Reader, wr io.Writer, c, expected []byte) (ok bool, err error) { if len(expected) == 1 { if _, err = wr.Write(c); err != nil { return } return bytes.Equal(c, expected), nil } buf := make([]byte, len(expected)-1) if buf, err = r.Peek(len(expected) - 1); err != nil { return } buf = append(c, buf...) return bytes.Equal(expected, buf), nil } func extractContent(r io.Reader) (content Content, err error) { wr := new(bytes.Buffer) if _, err = wr.ReadFrom(r); err != nil { return } return wr.Bytes(), nil }
parser/page.go
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.000175253429915756, 0.00017206072516273707, 0.00016528104606550187, 0.00017289104289375246, 0.0000026875704861595295 ]
{ "id": 0, "code_window": [ "\n", "func TestAbsUrlify(t *testing.T) {\n", "\tfiles := make(map[string][]byte)\n", "\ttarget := &InMemoryTarget{files: files}\n", "\ts := &Site{\n", "\t\tTarget: target,\n", "\t\tConfig: Config{BaseUrl: \"http://auth/bub/\"},\n" ], "labels": [ "keep", "keep", "keep", "add", "keep", "keep", "keep" ], "after_edit": [ "\tsources := []byteSource{\n", "\t\t{\"sect/doc1.html\", []byte(\"<!doctype html><html><head></head><body><a href=\\\"#frag1\\\">link</a></body></html>\")},\n", "\t\t{\"content/blue/doc2.html\", []byte(\"---\\nf: t\\n---\\n<!doctype html><html><body>more content</body></html>\")},\n", "\t}\n" ], "file_path": "hugolib/site_test.go", "type": "add", "edit_start_line_idx": 250 }
--- title: "Release Notes" date: "2013-07-01" aliases: ["/doc/release-notes/"] --- * **0.9.0** HEAD * Added support for aliases (redirects) * Cleanup of how content organization is handled * Support for top level pages (other than homepage) * Loads of unit and performance tests * Integration with travis ci * Complete overhaul of the documentation site * Full Windows support * **0.8.0** August 2, 2013 * Added support for pretty urls (filename/index.html vs filename.html) * Hugo supports a destination directory * Will efficiently sync content in static to destination directory * Cleaned up options.. now with support for short and long options * Added support for TOML * Added support for YAML * Added support for Previous & Next * Added support for indexes for the indexes * Better Windows compatibility * Support for series * Adding verbose output * Loads of bugfixes * **0.7.0** July 4, 2013 * Hugo now includes a simple server * First public release * **0.6.0** July 2, 2013 * Hugo includes an example documentation site which it builds * **0.5.0** June 25, 2013 * Hugo is quite usable and able to build spf13.com
docs/content/meta/release-notes.md
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.00017548703181091696, 0.00017076659423764795, 0.00016764842439442873, 0.00016996546764858067, 0.0000029459117740771035 ]
{ "id": 1, "code_window": [ "\ts := &Site{\n", "\t\tTarget: target,\n", "\t\tConfig: Config{BaseUrl: \"http://auth/bub/\"},\n", "\t\tSource: &inMemorySource{urlFakeSource},\n", "\t}\n", "\ts.initializeSiteInfo()\n", "\ts.prepTemplates()\n", "\tmust(s.addTemplate(\"blue/single.html\", TEMPLATE_WITH_URL))\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tSource: &inMemorySource{sources},\n" ], "file_path": "hugolib/site_test.go", "type": "replace", "edit_start_line_idx": 253 }
package hugolib import ( "bytes" "fmt" "html/template" "strings" "testing" ) const ( TEMPLATE_TITLE = "{{ .Title }}" PAGE_SIMPLE_TITLE = `--- title: simple template --- content` TEMPLATE_MISSING_FUNC = "{{ .Title | funcdoesnotexists }}" TEMPLATE_FUNC = "{{ .Title | urlize }}" TEMPLATE_CONTENT = "{{ .Content }}" TEMPLATE_DATE = "{{ .Date }}" INVALID_TEMPLATE_FORMAT_DATE = "{{ .Date.Format time.RFC3339 }}" TEMPLATE_WITH_URL = "<a href=\"foobar.jpg\">Going</a>" PAGE_URL_SPECIFIED = `--- title: simple template url: "mycategory/my-whatever-content/" --- content` PAGE_WITH_MD = `--- title: page with md --- # heading 1 text ## heading 2 more text ` ) func pageMust(p *Page, err error) *Page { if err != nil { panic(err) } return p } func TestDegenerateRenderThingMissingTemplate(t *testing.T) { p, _ := ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md") s := new(Site) s.prepTemplates() _, err := s.RenderThing(p, "foobar") if err == nil { t.Errorf("Expected err to be returned when missing the template.") } } func TestAddInvalidTemplate(t *testing.T) { s := new(Site) s.prepTemplates() err := s.addTemplate("missing", TEMPLATE_MISSING_FUNC) if err == nil { t.Fatalf("Expecting the template to return an error") } } func matchRender(t *testing.T, s *Site, p *Page, tmplName string, expected string) { content, err := s.RenderThing(p, tmplName) if err != nil { t.Fatalf("Unable to render template.") } if string(content.Bytes()) != expected { t.Fatalf("Content did not match expected: %s. got: %s", expected, content) } } func _TestAddSameTemplateTwice(t *testing.T) { p := pageMust(ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md")) s := new(Site) s.prepTemplates() err := s.addTemplate("foo", TEMPLATE_TITLE) if err != nil { t.Fatalf("Unable to add template foo") } matchRender(t, s, p, "foo", "simple template") err = s.addTemplate("foo", "NEW {{ .Title }}") if err != nil { t.Fatalf("Unable to add template foo: %s", err) } matchRender(t, s, p, "foo", "NEW simple template") } func TestRenderThing(t *testing.T) { tests := []struct { content string template string expected string }{ {PAGE_SIMPLE_TITLE, TEMPLATE_TITLE, "simple template"}, {PAGE_SIMPLE_TITLE, TEMPLATE_FUNC, "simple-template"}, {PAGE_WITH_MD, TEMPLATE_CONTENT, "<h1>heading 1</h1>\n\n<p>text</p>\n\n<h2>heading 2</h2>\n\n<p>more text</p>\n"}, {SIMPLE_PAGE_RFC3339_DATE, TEMPLATE_DATE, "2013-05-17 16:59:30 &#43;0000 UTC"}, } s := new(Site) s.prepTemplates() for i, test := range tests { p, err := ReadFrom(strings.NewReader(test.content), "content/a/file.md") if err != nil { t.Fatalf("Error parsing buffer: %s", err) } templateName := fmt.Sprintf("foobar%d", i) err = s.addTemplate(templateName, test.template) if err != nil { t.Fatalf("Unable to add template") } p.Content = template.HTML(p.Content) html, err2 := s.RenderThing(p, templateName) if err2 != nil { t.Errorf("Unable to render html: %s", err) } if string(html.Bytes()) != test.expected { t.Errorf("Content does not match.\nExpected\n\t'%q'\ngot\n\t'%q'", test.expected, html) } } } func TestRenderThingOrDefault(t *testing.T) { tests := []struct { content string missing bool template string expected string }{ {PAGE_SIMPLE_TITLE, true, TEMPLATE_TITLE, "simple template"}, {PAGE_SIMPLE_TITLE, true, TEMPLATE_FUNC, "simple-template"}, {PAGE_SIMPLE_TITLE, false, TEMPLATE_TITLE, "simple template"}, {PAGE_SIMPLE_TITLE, false, TEMPLATE_FUNC, "simple-template"}, } s := new(Site) s.prepTemplates() for i, test := range tests { p, err := ReadFrom(strings.NewReader(PAGE_SIMPLE_TITLE), "content/a/file.md") if err != nil { t.Fatalf("Error parsing buffer: %s", err) } templateName := fmt.Sprintf("default%d", i) err = s.addTemplate(templateName, test.template) if err != nil { t.Fatalf("Unable to add template") } var html *bytes.Buffer var err2 error if test.missing { html, err2 = s.RenderThingOrDefault(p, "missing", templateName) } else { html, err2 = s.RenderThingOrDefault(p, templateName, "missing_default") } if err2 != nil { t.Errorf("Unable to render html: %s", err) } if string(html.Bytes()) != test.expected { t.Errorf("Content does not match. Expected '%s', got '%s'", test.expected, html) } } } func TestSetOutFile(t *testing.T) { s := new(Site) p := pageMust(ReadFrom(strings.NewReader(PAGE_URL_SPECIFIED), "content/a/file.md")) s.setOutFile(p) expected := "mycategory/my-whatever-content/index.html" if p.OutFile != "mycategory/my-whatever-content/index.html" { t.Errorf("Outfile does not match. Expected '%s', got '%s'", expected, p.OutFile) } } func TestSkipRender(t *testing.T) { files := make(map[string][]byte) target := &InMemoryTarget{files: files} sources := []byteSource{ {"sect/doc1.html", []byte("---\nmarkup: markdown\n---\n# title\nsome *content*")}, {"sect/doc2.html", []byte("<!doctype html><html><body>more content</body></html>")}, {"sect/doc3.md", []byte("# doc3\n*some* content")}, {"sect/doc4.md", []byte("---\ntitle: doc4\n---\n# doc4\n*some content*")}, {"sect/doc5.html", []byte("<!doctype html><html>{{ template \"head\" }}<body>body5</body></html>")}, } s := &Site{ Target: target, Config: Config{BaseUrl: "http://auth/bub/"}, Source: &inMemorySource{sources}, } s.initializeSiteInfo() s.prepTemplates() must(s.addTemplate("_default/single.html", "{{.Content}}")) must(s.addTemplate("head", "<head><script src=\"script.js\"></script></head>")) if err := s.CreatePages(); err != nil { t.Fatalf("Unable to create pages: %s", err) } if err := s.BuildSiteMeta(); err != nil { t.Fatalf("Unable to build site metadata: %s", err) } if err := s.RenderPages(); err != nil { t.Fatalf("Unable to render pages. %s", err) } tests := []struct { doc string expected string }{ {"sect/doc1.html", "<html><head></head><body><h1>title</h1>\n\n<p>some <em>content</em></p>\n</body></html>"}, {"sect/doc2.html", "<!DOCTYPE html><html><head></head><body>more content</body></html>"}, {"sect/doc3.html", "<html><head></head><body><h1>doc3</h1>\n\n<p><em>some</em> content</p>\n</body></html>"}, {"sect/doc4.html", "<html><head></head><body><h1>doc4</h1>\n\n<p><em>some content</em></p>\n</body></html>"}, {"sect/doc5.html", "<!DOCTYPE html><html><head><script src=\"http://auth/bub/script.js\"></script></head><body>body5</body></html>"}, } for _, test := range tests { content, ok := target.files[test.doc] if !ok { t.Fatalf("Did not find %s in target. %v", test.doc, target.files) } if !bytes.Equal(content, []byte(test.expected)) { t.Errorf("%s content expected:\n%q\ngot:\n%q", test.doc, test.expected, string(content)) } } } func TestAbsUrlify(t *testing.T) { files := make(map[string][]byte) target := &InMemoryTarget{files: files} s := &Site{ Target: target, Config: Config{BaseUrl: "http://auth/bub/"}, Source: &inMemorySource{urlFakeSource}, } s.initializeSiteInfo() s.prepTemplates() must(s.addTemplate("blue/single.html", TEMPLATE_WITH_URL)) if err := s.CreatePages(); err != nil { t.Fatalf("Unable to create pages: %s", err) } if err := s.BuildSiteMeta(); err != nil { t.Fatalf("Unable to build site metadata: %s", err) } if err := s.RenderPages(); err != nil { t.Fatalf("Unable to render pages. %s", err) } content, ok := target.files["content/blue/slug-doc-1.html"] if !ok { t.Fatalf("Unable to locate rendered content") } expected := "<html><head></head><body><a href=\"http://auth/bub/foobar.jpg\">Going</a></body></html>" if string(content) != expected { t.Errorf("AbsUrlify content expected:\n%q\ngot\n%q", expected, string(content)) } }
hugolib/site_test.go
1
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.998354434967041, 0.3863343298435211, 0.00016765593318268657, 0.007949786260724068, 0.44080865383148193 ]
{ "id": 1, "code_window": [ "\ts := &Site{\n", "\t\tTarget: target,\n", "\t\tConfig: Config{BaseUrl: \"http://auth/bub/\"},\n", "\t\tSource: &inMemorySource{urlFakeSource},\n", "\t}\n", "\ts.initializeSiteInfo()\n", "\ts.prepTemplates()\n", "\tmust(s.addTemplate(\"blue/single.html\", TEMPLATE_WITH_URL))\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tSource: &inMemorySource{sources},\n" ], "file_path": "hugolib/site_test.go", "type": "replace", "edit_start_line_idx": 253 }
// Copyright © 2013 Steve Francia <[email protected]>. // // Licensed under the Simple Public License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://opensource.org/licenses/Simple-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package hugolib import ( "bitbucket.org/pkg/inflect" "bytes" "fmt" "github.com/spf13/hugo/source" "github.com/spf13/hugo/target" helpers "github.com/spf13/hugo/template" "github.com/spf13/hugo/template/bundle" "github.com/spf13/hugo/transform" "github.com/spf13/nitro" "html/template" "io" "os" "path" "strings" "time" ) var DefaultTimer = nitro.Initalize() func MakePermalink(domain string, path string) string { return strings.TrimRight(domain, "/") + "/" + strings.TrimLeft(path, "/") } func mkdirIf(path string) error { return os.MkdirAll(path, 0777) } func FatalErr(str string) { fmt.Println(str) os.Exit(1) } func PrintErr(str string, a ...interface{}) { fmt.Fprintln(os.Stderr, str, a) } // Site contains all the information relevent for constructing a static // site. The basic flow of information is as follows: // // 1. A list of Files is parsed and then converted into Pages. // // 2. Pages contain sections (based on the file they were generated from), // aliases and slugs (included in a pages frontmatter) which are the // various targets that will get generated. There will be canonical // listing. // // 3. Indexes are created via configuration and will present some aspect of // the final page and typically a perm url. // // 4. All Pages are passed through a template based on their desired // layout based on numerous different elements. // // 5. The entire collection of files is written to disk. type Site struct { Config Config Pages Pages Tmpl bundle.Template Indexes IndexList Source source.Input Sections Index Info SiteInfo Shortcodes map[string]ShortcodeFunc timer *nitro.B Transformer *transform.Transformer Target target.Output Alias target.AliasPublisher } type SiteInfo struct { BaseUrl template.URL Indexes OrderedIndexList Recent *Pages LastChange time.Time Title string Config *Config } func (s *Site) getFromIndex(kind string, name string) Pages { return s.Indexes[kind][name] } func (s *Site) timerStep(step string) { if s.timer == nil { s.timer = DefaultTimer } s.timer.Step(step) } func (s *Site) Build() (err error) { if err = s.Process(); err != nil { return } if err = s.Render(); err != nil { fmt.Printf("Error rendering site: %s\nAvailable templates:\n", err) for _, template := range s.Tmpl.Templates() { fmt.Printf("\t%s\n", template.Name()) } return } return nil } func (s *Site) Analyze() { s.Process() s.initTarget() s.Alias = &target.HTMLRedirectAlias{ PublishDir: s.absPublishDir(), } s.ShowPlan(os.Stdout) } func (s *Site) prepTemplates() { s.Tmpl = bundle.NewTemplate() s.Tmpl.LoadTemplates(s.absLayoutDir()) } func (s *Site) addTemplate(name, data string) error { return s.Tmpl.AddTemplate(name, data) } func (s *Site) Process() (err error) { s.initialize() s.prepTemplates() s.timerStep("initialize & template prep") if err = s.CreatePages(); err != nil { return err } s.setupPrevNext() s.timerStep("import pages") if err = s.BuildSiteMeta(); err != nil { return } s.timerStep("build indexes") return } func (s *Site) Render() (err error) { if err = s.RenderAliases(); err != nil { return } s.timerStep("render and write aliases") s.ProcessShortcodes() s.timerStep("render shortcodes") s.timerStep("absolute URLify") if err = s.RenderIndexes(); err != nil { return } s.RenderIndexesIndexes() s.timerStep("render and write indexes") s.RenderLists() s.timerStep("render and write lists") if err = s.RenderPages(); err != nil { return } s.timerStep("render and write pages") if err = s.RenderHomePage(); err != nil { return } s.timerStep("render and write homepage") return } func (s *Site) checkDescriptions() { for _, p := range s.Pages { if len(p.Description) < 60 { fmt.Println(p.FileName + " ") } } } func (s *Site) initialize() { s.checkDirectories() staticDir := s.Config.GetAbsPath(s.Config.StaticDir + "/") s.Source = &source.Filesystem{ AvoidPaths: []string{staticDir}, Base: s.absContentDir(), } s.initializeSiteInfo() s.Shortcodes = make(map[string]ShortcodeFunc) } func (s *Site) initializeSiteInfo() { s.Info = SiteInfo{ BaseUrl: template.URL(s.Config.BaseUrl), Title: s.Config.Title, Recent: &s.Pages, Config: &s.Config, } } // Check if File / Directory Exists func exists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { return true, nil } if os.IsNotExist(err) { return false, nil } return false, err } func (s *Site) absLayoutDir() string { return s.Config.GetAbsPath(s.Config.LayoutDir) } func (s *Site) absContentDir() string { return s.Config.GetAbsPath(s.Config.ContentDir) } func (s *Site) absPublishDir() string { return s.Config.GetAbsPath(s.Config.PublishDir) } func (s *Site) checkDirectories() { if b, _ := dirExists(s.absLayoutDir()); !b { FatalErr("No layout directory found, expecting to find it at " + s.absLayoutDir()) } if b, _ := dirExists(s.absContentDir()); !b { FatalErr("No source directory found, expecting to find it at " + s.absContentDir()) } mkdirIf(s.absPublishDir()) } func (s *Site) ProcessShortcodes() { for _, page := range s.Pages { page.Content = template.HTML(ShortcodesHandle(string(page.Content), page, s.Tmpl)) } } func (s *Site) CreatePages() (err error) { for _, file := range s.Source.Files() { page, err := ReadFrom(file.Contents, file.Name) if err != nil { return err } page.Site = s.Info page.Tmpl = s.Tmpl if err = s.setUrlPath(page); err != nil { return err } s.setOutFile(page) if s.Config.BuildDrafts || !page.Draft { s.Pages = append(s.Pages, page) } } s.Pages.Sort() return } func (s *Site) setupPrevNext() { for i, page := range s.Pages { if i < len(s.Pages)-1 { page.Next = s.Pages[i+1] } if i > 0 { page.Prev = s.Pages[i-1] } } } func (s *Site) setUrlPath(p *Page) error { y := strings.TrimPrefix(p.FileName, s.absContentDir()) x := strings.Split(y, "/") if len(x) <= 1 { return fmt.Errorf("Zero length page name. filename: %s", y) } p.Section = strings.Trim(x[1], "/") p.Path = path.Join(x[:len(x)-1]...) return nil } // If Url is provided it is assumed to be the complete relative path // and will override everything // Otherwise path + slug is used if provided // Lastly path + filename is used if provided func (s *Site) setOutFile(p *Page) { // Always use Url if it's specified if len(strings.TrimSpace(p.Url)) > 2 { p.OutFile = strings.TrimSpace(p.Url) if strings.HasSuffix(p.OutFile, "/") { p.OutFile = p.OutFile + "index.html" } return } var outfile string if len(strings.TrimSpace(p.Slug)) > 0 { outfile = strings.TrimSpace(p.Slug) + "." + p.Extension } else { // Fall back to filename _, t := path.Split(p.FileName) outfile = replaceExtension(strings.TrimSpace(t), p.Extension) } p.OutFile = p.Path + "/" + strings.TrimSpace(outfile) } func (s *Site) BuildSiteMeta() (err error) { s.Indexes = make(IndexList) s.Sections = make(Index) for _, plural := range s.Config.Indexes { s.Indexes[plural] = make(Index) for _, p := range s.Pages { vals := p.GetParam(plural) if vals != nil { v, ok := vals.([]string) if ok { for _, idx := range v { s.Indexes[plural].Add(idx, p) } } else { PrintErr("Invalid " + plural + " in " + p.File.FileName) } } } for k, _ := range s.Indexes[plural] { s.Indexes[plural][k].Sort() } } for _, p := range s.Pages { s.Sections.Add(p.Section, p) } for k, _ := range s.Sections { s.Sections[k].Sort() } s.Info.Indexes = s.Indexes.BuildOrderedIndexList() if len(s.Pages) == 0 { return } s.Info.LastChange = s.Pages[0].Date // populate pages with site metadata for _, p := range s.Pages { p.Site = s.Info } return } func (s *Site) possibleIndexes() (indexes []string) { for _, p := range s.Pages { for k, _ := range p.Params { if !inStringArray(indexes, k) { indexes = append(indexes, k) } } } return } func inStringArray(arr []string, el string) bool { for _, v := range arr { if v == el { return true } } return false } func (s *Site) RenderAliases() error { for _, p := range s.Pages { for _, a := range p.Aliases { if err := s.WriteAlias(a, p.Permalink()); err != nil { return err } } } return nil } func (s *Site) RenderPages() (err error) { for _, p := range s.Pages { var layout string if !p.IsRenderable() { layout = "__" + p.FileName _, err := s.Tmpl.New(layout).Parse(string(p.Content)) if err != nil { return err } } else { layout = p.Layout() } content, err := s.RenderThingOrDefault(p, layout, "_default/single.html") if err != nil { return err } err = s.WritePublic(p.OutFile, content) if err != nil { return err } } return nil } func (s *Site) RenderIndexes() error { for singular, plural := range s.Config.Indexes { for k, o := range s.Indexes[plural] { n := s.NewNode() n.Title = strings.Title(k) url := helpers.Urlize(plural + "/" + k) n.Url = url + ".html" plink := n.Url n.Permalink = permalink(s, plink) n.RSSlink = permalink(s, url+".xml") n.Date = o[0].Date n.Data[singular] = o n.Data["Pages"] = o layout := "indexes/" + singular + ".html" x, err := s.RenderThing(n, layout) if err != nil { return err } var base string base = plural + "/" + k err = s.WritePublic(base+".html", x) if err != nil { return err } if a := s.Tmpl.Lookup("rss.xml"); a != nil { // XML Feed y := s.NewXMLBuffer() n.Url = helpers.Urlize(plural + "/" + k + ".xml") n.Permalink = permalink(s, n.Url) s.Tmpl.ExecuteTemplate(y, "rss.xml", n) err = s.WritePublic(base+".xml", y) if err != nil { return err } } } } return nil } func (s *Site) RenderIndexesIndexes() (err error) { layout := "indexes/indexes.html" if s.Tmpl.Lookup(layout) != nil { for singular, plural := range s.Config.Indexes { n := s.NewNode() n.Title = strings.Title(plural) url := helpers.Urlize(plural) n.Url = url + "/index.html" n.Permalink = permalink(s, n.Url) n.Data["Singular"] = singular n.Data["Plural"] = plural n.Data["Index"] = s.Indexes[plural] n.Data["OrderedIndex"] = s.Info.Indexes[plural] x, err := s.RenderThing(n, layout) if err != nil { return err } err = s.WritePublic(plural+"/index.html", x) if err != nil { return err } } } return } func (s *Site) RenderLists() error { for section, data := range s.Sections { n := s.NewNode() n.Title = strings.Title(inflect.Pluralize(section)) n.Url = helpers.Urlize(section + "/" + "index.html") n.Permalink = permalink(s, n.Url) n.RSSlink = permalink(s, section+".xml") n.Date = data[0].Date n.Data["Pages"] = data layout := "indexes/" + section + ".html" content, err := s.RenderThingOrDefault(n, layout, "_default/index.html") if err != nil { return err } err = s.WritePublic(section, content) if err != nil { return err } if a := s.Tmpl.Lookup("rss.xml"); a != nil { // XML Feed n.Url = helpers.Urlize(section + ".xml") n.Permalink = template.HTML(string(n.Site.BaseUrl) + n.Url) y := s.NewXMLBuffer() s.Tmpl.ExecuteTemplate(y, "rss.xml", n) err = s.WritePublic(section+"/index.xml", y) return err } } return nil } func (s *Site) RenderHomePage() error { n := s.NewNode() n.Title = n.Site.Title n.Url = helpers.Urlize(string(n.Site.BaseUrl)) n.RSSlink = permalink(s, "index.xml") n.Permalink = permalink(s, "") if len(s.Pages) > 0 { n.Date = s.Pages[0].Date if len(s.Pages) < 9 { n.Data["Pages"] = s.Pages } else { n.Data["Pages"] = s.Pages[:9] } } x, err := s.RenderThing(n, "index.html") if err != nil { return err } err = s.WritePublic("/", x) if err != nil { return err } if a := s.Tmpl.Lookup("rss.xml"); a != nil { // XML Feed n.Url = helpers.Urlize("index.xml") n.Title = "Recent Content" n.Permalink = permalink(s, "index.xml") y := s.NewXMLBuffer() s.Tmpl.ExecuteTemplate(y, "rss.xml", n) err = s.WritePublic("index.xml", y) return err } if a := s.Tmpl.Lookup("404.html"); a != nil { n.Url = helpers.Urlize("404.html") n.Title = "404 Page not found" n.Permalink = permalink(s, "404.html") x, err := s.RenderThing(n, "404.html") if err != nil { return err } err = s.WritePublic("404.html", x) return err } return nil } func (s *Site) Stats() { fmt.Printf("%d pages created \n", len(s.Pages)) for _, pl := range s.Config.Indexes { fmt.Printf("%d %s index created\n", len(s.Indexes[pl]), pl) } } func permalink(s *Site, plink string) template.HTML { return template.HTML(MakePermalink(string(s.Info.BaseUrl), plink)) } func (s *Site) NewNode() *Node { return &Node{ Data: make(map[string]interface{}), Site: s.Info, } } func (s *Site) RenderThing(d interface{}, layout string) (*bytes.Buffer, error) { if s.Tmpl.Lookup(layout) == nil { return nil, fmt.Errorf("Layout not found: %s", layout) } buffer := new(bytes.Buffer) err := s.Tmpl.ExecuteTemplate(buffer, layout, d) return buffer, err } func (s *Site) RenderThingOrDefault(d interface{}, layout string, defaultLayout string) (*bytes.Buffer, error) { content, err := s.RenderThing(d, layout) if err != nil { var err2 error content, err2 = s.RenderThing(d, defaultLayout) if err2 == nil { return content, err2 } } return content, err } func (s *Site) NewXMLBuffer() *bytes.Buffer { header := "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\" ?>\n" return bytes.NewBufferString(header) } func (s *Site) initTarget() { if s.Target == nil { s.Target = &target.Filesystem{ PublishDir: s.absPublishDir(), UglyUrls: s.Config.UglyUrls, } } } func (s *Site) WritePublic(path string, content io.Reader) (err error) { s.initTarget() if s.Config.Verbose { fmt.Println(path) } if s.Transformer == nil { s.Transformer = &transform.Transformer{BaseURL: s.Config.BaseUrl} } final := new(bytes.Buffer) s.Transformer.Apply(content, final) return s.Target.Publish(path, final) } func (s *Site) WriteAlias(path string, permalink template.HTML) (err error) { if s.Alias == nil { s.initTarget() s.Alias = &target.HTMLRedirectAlias{ PublishDir: s.absPublishDir(), } } if s.Config.Verbose { fmt.Println(path) } return s.Alias.Publish(path, permalink) }
hugolib/site.go
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.9941940903663635, 0.3193369507789612, 0.0001661249261815101, 0.02646700292825699, 0.41341906785964966 ]
{ "id": 1, "code_window": [ "\ts := &Site{\n", "\t\tTarget: target,\n", "\t\tConfig: Config{BaseUrl: \"http://auth/bub/\"},\n", "\t\tSource: &inMemorySource{urlFakeSource},\n", "\t}\n", "\ts.initializeSiteInfo()\n", "\ts.prepTemplates()\n", "\tmust(s.addTemplate(\"blue/single.html\", TEMPLATE_WITH_URL))\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tSource: &inMemorySource{sources},\n" ], "file_path": "hugolib/site_test.go", "type": "replace", "edit_start_line_idx": 253 }
+++ title = "Front Matter" date = "2013-07-01" aliases = ["/doc/front-matter/"] +++ The front matter is one of the features that gives Hugo it's strength. It enables you to include the meta data of the content right with it. Hugo supports a few different formats each with their own identifying tokens. Supported formats: <br> **YAML**, identified by '\-\-\-'. <br> **TOML**, indentified with '+++'.<br> **JSON**, a single JSON object which is surrounded by '{' and '}' each on their own line. ### YAML Example --- title: "spf13-vim 3.0 release and new website" description: "spf13-vim is a cross platform distribution of vim plugins and resources for Vim." tags: [ ".vimrc", "plugins", "spf13-vim", "vim" ] date: "2012-04-06" categories: - "Development" - "VIM" slug: "spf13-vim-3-0-release-and-new-website" --- Content of the file goes Here ### TOML Example +++ title = "spf13-vim 3.0 release and new website" description = "spf13-vim is a cross platform distribution of vim plugins and resources for Vim." tags = [ ".vimrc", "plugins", "spf13-vim", "vim" ] date = "2012-04-06" categories = [ "Development", "VIM" ] slug = "spf13-vim-3-0-release-and-new-website" +++ Content of the file goes Here ### JSON Example { "title": "spf13-vim 3.0 release and new website", "description": "spf13-vim is a cross platform distribution of vim plugins and resources for Vim.", "tags": [ ".vimrc", "plugins", "spf13-vim", "vim" ], "date": "2012-04-06", "categories": [ "Development", "VIM" ], "slug": "spf13-vim-3-0-release-and-new-website", } Content of the file goes Here ### Variables There are a few predefined variables that Hugo is aware of and utilizes. The user can also create any variable they want to. These will be placed into the `.Params` variable available to the templates. **Field names are case insensitive.** #### Required **title** The title for the content. <br> **description** The description for the content.<br> **date** The date the content will be sorted by.<br> **indexes** These will use the field name of the plural form of the index (see tags and categories above) #### Optional **redirect** Mark the post as a redirect post<br> **draft** If true the content will not be rendered unless `hugo` is called with -d<br> **type** The type of the content (will be derived from the directory automatically if unset).<br> **markup** (Experimental) Specify "rst" for reStructuredText (requires `rst2html`,) or "md" (default) for the Markdown.<br> **slug** The token to appear in the tail of the url.<br> *or*<br> **url** The full path to the content from the web root.<br> *If neither is present the filename will be used.*
docs/content/content/front-matter.md
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.00017829594435170293, 0.0001723368768580258, 0.0001658617111388594, 0.00017327131354250014, 0.000004088004061486572 ]
{ "id": 1, "code_window": [ "\ts := &Site{\n", "\t\tTarget: target,\n", "\t\tConfig: Config{BaseUrl: \"http://auth/bub/\"},\n", "\t\tSource: &inMemorySource{urlFakeSource},\n", "\t}\n", "\ts.initializeSiteInfo()\n", "\ts.prepTemplates()\n", "\tmust(s.addTemplate(\"blue/single.html\", TEMPLATE_WITH_URL))\n" ], "labels": [ "keep", "keep", "keep", "replace", "keep", "keep", "keep", "keep" ], "after_edit": [ "\t\tSource: &inMemorySource{sources},\n" ], "file_path": "hugolib/site_test.go", "type": "replace", "edit_start_line_idx": 253 }
--- title: The Little Redis Book cn --- \thispagestyle{empty} \changepage{}{}{}{-0.5cm}{}{2cm}{}{}{} ![The Little Redis Book cn, By Karl Seguin, Translate By Jason Lai](title.png)\ \clearpage \changepage{}{}{}{0.5cm}{}{-2cm}{}{}{} ## 关于此书 ### 许可证 《The Little Redis Book》是经由Attribution-NonCommercial 3.0 Unported license许可的,你不需要为此书付钱。 你可以自由地对此书进行复制,分发,修改或者展示等操作。当然,你必须知道且认可这本书的作者是Karl Seguin,译者是赖立维,而且不应该将此书用于商业用途。 关于这个**许可证**的*详细描述*在这里: <http://creativecommons.org/licenses/by-nc/3.0/legalcode> ### 关于作者 作者Karl Seguin是一名在多项技术领域浸淫多年的开发者。他是开源软件计划的活跃贡献者,同时也是一名技术作者以及业余演讲者。他写过若干关于Radis的文章以及一些工具。在他的一个面向业余游戏开发者的免费服务里,Redis为其中的评级和统计功能提供了支持:[mogade.com](http://mogade.com/)。 Karl之前还写了[《The Little MongoDB Book》](http://openmymind.net/2011/3/28/The-Little-MongoDB-Book/),这是一本免费且受好评,关于MongoDB的书。 他的博客是<http://openmymind.net>,你也可以关注他的Twitter帐号,via [@karlseguin](http://twitter.com/karlseguin)。 ### 关于译者 译者 赖立维 是一名长在天朝的普通程序员,对许多技术都有浓厚的兴趣,是开源软件的支持者,Emacs的轻度使用者。 虽然译者已经很认真地对待这次翻译,但是限于水平有限,肯定会有不少错漏,如果发现该书的翻译有什么需要修改,可以通过他的邮箱与他联系。他的邮箱是<[email protected]>。 ### 致谢 必须特别感谢[Perry Neal](https://twitter.com/perryneal)一直以来的指导,我的眼界、触觉以及激情都来源于你。你为我提供了无价的帮助,感谢你。 ### 最新版本 此书的最新有效资源在: <http://github.com/karlseguin/the-little-redis-book> 中文版是英文版的一个分支,最新的中文版本在: <https://github.com/JasonLai256/the-little-redis-book> \clearpage ## 简介 最近几年来,关于持久化和数据查询的相关技术,其需求已经增长到了让人惊讶的程度。可以断言,关系型数据库再也不是放之四海皆准。换一句话说,围绕数据的解决方案不可能再只有唯一一种。 对于我来说,在众多新出现的解决方案和工具里,最让人兴奋的,无疑是Redis。为什么?首先是因为其让人不可思议的容易学习,只需要简短的几个小时学习时间,就能对Redis有个大概的认识。还有,Redis在处理一组特定的问题集的同时能保持相当的通用性。更准确地说就是,Redis不会尝试去解决关于数据的所有事情。在你足够了解Redis后,事情就会变得越来越清晰,什么是可行的,什么是不应该由Redis来处理的。作为一名开发人员,如此的经验当是相当的美妙。 当你能仅使用Redis去构建一个完整系统时,我想大多数人将会发现,Redis能使得他们的许多数据方案变得更为通用,不论是一个传统的关系型数据库,一个面向文档的系统,或是其它更多的东西。这是一种用来实现某些特定特性的解决方法。就类似于一个索引引擎,你不会在Lucene上构建整个程序,但当你需要足够好的搜索,为什么不使用它呢?这对你和你的用户都有好处。当然,关于Redis和索引引擎之间相似性的讨论到此为止。 本书的目的是向读者传授掌握Redis所需要的基本知识。我们将会注重于学习Redis的5种数据结构,并研究各种数据建模方法。我们还会接触到一些主要的管理细节和调试技巧。 ## 入门 每个人的学习方式都不一样,有的人喜欢亲自实践学习,有的喜欢观看教学视频,还有的喜欢通过阅读来学习。对于Redis,没有什么比亲自实践学习来得效果更好的了。Redis的安装非常简单。而且通过随之安装的一个简单的命令解析程序,就能处理我们想做的一切事情。让我们先花几分钟的时间把Redis安装到我们的机器上。 ### Windows平台 Redis并没有官方支持Windows平台,但还是可供选择。你不会想在这里配置实际的生产环境,不过在我过往的开发经历里并没有感到有什么限制。 首先进入<https://github.com/dmajkic/redis/downloads>,然后下载最新的版本(应该会在列表的最上方)。 获取zip文件,然后根据你的系统架构,打开`64bit`或`32bit`文件夹。 ### *nix和MacOSX平台 对于*nix和MacOSX平台的用户,从源文件来安装是你的最佳选择。通过最新的版本号来选择,有效地址于<http://redis.io/download>。在编写此书的时候,最新的版本是2.4.6,我们可以运行下面的命令来安装该版本: wget http://redis.googlecode.com/files/redis-2.4.6.tar.gz tar xzf redis-2.4.6.tar.gz cd redis-2.4.6 make (当然,Redis同样可以通过套件管理程序来安装。例如,使用Homebrew的MaxOSX用户可以只键入`brew install redis`即可。) 如果你是通过源文件来安装,二进制可执行文件会被放置在`src`目录里。通过运行`cd src`可跳转到`src`目录。 ### 运行和连接Redis 如果一切都工作正常,那Redis的二进制文件应该已经可以曼妙地跳跃于你的指尖之下。Redis只有少量的可执行文件,我们将着重于Redis的服务器和命令行界面(一个类DOS的客户端)。首先,让我们来运行服务器。在Windows平台,双击`redis-server`,在*nix/MacOSX平台则运行`./redis-server`. 如果你仔细看了启动信息,你会看到一个警告,指没能找到`redis.conf`文件。Redis将会采用内置的默认设置,这对于我们将要做的已经足够了。 然后,通过双击`redis-cli`(Windows平台)或者运行`./redis-cli`(*nix/MacOSX平台),启动Redis的控制台。控制台将会通过默认的端口(6379)来连接本地运行的服务器。 可以在命令行界面键入`info`命令来查看一切是不是都运行正常。你会很乐意看到这么一大组关键字-值(key-value)对的显示,这为我们查看服务器的状态提供了大量有效信息。 如果在上面的启动步骤里遇到什么问题,我建议你到[Redis的官方支持组](https://groups.google.com/forum/#!forum/redis-db)里获取帮助。 ## 驱动Redis 很快你就会发现,Redis的API就如一组定义明确的函数那般容易理解。Redis具有让人难以置信的简单性,其操作过程也同样如此。这意味着,无论你是使用命令行程序,或是使用你喜欢的语言来驱动,整体的感觉都不会相差多少。因此,相对于命令行程序,如果你更愿意通过一种编程语言去驱动Redis,你不会感觉到有任何适应的问题。如果真想如此,可以到Redis的[客户端推荐页面](http://redis.io/clients)下载适合的Redis载体。 \clearpage ## 第1章 - 基础知识 是什么使Redis显得这么特别?Redis具体能解决什么类型的问题?要实际应用Redis,开发者必须储备什么知识?在我们能回答这么一些问题之前,我们需要明白Redis到底是什么。 Redis通常被人们认为是一种持久化的存储器关键字-值型存储(in-memory persistent key-value store)。我认为这种对Redis的描述并不太准确。Redis的确是将所有的数据存放于存储器(更多是是按位存储),而且也确实通过将数据写入磁盘来实现持久化,但是Redis的实际意义比单纯的关键字-值型存储要来得深远。纠正脑海里的这种误解观点非常关键,否则你对于Redis之道以及其应用的洞察力就会变得越发狭义。 事实是,Redis引入了5种不同的数据结构,只有一个是典型的关键字-值型结构。理解Redis的关键就在于搞清楚这5种数据结构,其工作的原理都是如何,有什么关联方法以及你能怎样应用这些数据结构去构建模型。首先,让我们来弄明白这些数据结构的实际意义。 应用上面提及的数据结构概念到我们熟悉的关系型数据库里,我们可以认为其引入了一个单独的数据结构——表格。表格既复杂又灵活,基于表格的存储和管理,没有多少东西是你不能进行建模的。然而,这种通用性并不是没有缺点。具体来说就是,事情并不是总能达到假设中的简单或者快速。相对于这种普遍适用(one-size-fits-all)的结构体系,我们可以使用更为专门化的结构体系。当然,因此可能有些事情我们会完成不了(至少,达不到很好的程度)。但话说回来,这样做就能确定我们可以获得想象中的简单性和速度吗? 针对特定类型的问题使用特定的数据结构?我们不就是这样进行编程的吗?你不会使用一个散列表去存储每份数据,也不会使用一个标量变量去存储。对我来说,这正是Redis的做法。如果你需要处理标量、列表、散列或者集合,为什么不直接就用标量、列表、散列和集合去存储他们?为什么不是直接调用`exists(key)`去检测一个已存在的值,而是要调用其他比O(1)(常量时间查找,不会因为待处理元素的增长而变慢)慢的操作? ### 数据库(Databases) 与你熟悉的关系型数据库一致,Redis有着相同的数据库基本概念,即一个数据库包含一组数据。典型的数据库应用案例是,将一个程序的所有数据组织起来,使之与另一个程序的数据保持独立。 在Redis里,数据库简单的使用一个数字编号来进行辨认,默认数据库的数字编号是`0`。如果你想切换到一个不同的数据库,你可以使用`select`命令来实现。在命令行界面里键入`select 1`,Redis应该会回复一条`OK`的信息,然后命令行界面里的提示符会变成类似`redis 127.0.0.1:6379[1]>`这样。如果你想切换回默认数据库,只要在命令行界面键入`select 0`即可。 ### 命令、关键字和值(Commands, Keys and Values) Redis不仅仅是一种简单的关键字-值型存储,从其核心概念来看,Redis的5种数据结构中的每一个都至少有一个关键字和一个值。在转入其它关于Redis的有用信息之前,我们必须理解关键字和值的概念。 关键字(Keys)是用来标识数据块。我们将会很常跟关键字打交道,不过在现在,明白关键字就是类似于`users:leto`这样的表述就足够了。一般都能很好地理解到,这样关键字包含的信息是一个名为`leto`的用户。这个关键字里的冒号没有任何特殊含义,对于Redis而言,使用分隔符来组织关键字是很常见的方法。 值(Values)是关联于关键字的实际值,可以是任何东西。有时候你会存储字符串,有时候是整数,还有时候你会存储序列化对象(使用JSON、XML或其他格式)。在大多数情况下,Redis会把值看做是一个字节序列,而不会关注它们实质上是什么。要注意,不同的Redis载体处理序列化会有所不同(一些会让你自己决定)。因此,在这本书里,我们将仅讨论字符串、整数和JSON。 现在让我们活动一下手指吧。在命令行界面键入下面的命令: set users:leto "{name: leto, planet: dune, likes: [spice]}" 这就是Redis命令的基本构成。首先我们要有一个确定的命令,在上面的语句里就是`set`。然后就是相应的参数,`set`命令接受两个参数,包括要设置的关键字,以及相应要设置的值。很多的情况是,命令接受一个关键字(当这种情况出现,其经常是第一个参数)。你能想到如何去获取这个值吗?我想你会说(当然一时拿不准也没什么): get users:leto 关键字和值的是Redis的基本概念,而`get`和`set`命令是对此最简单的使用。你可以创建更多的用户,去尝试不同类型的关键字以及不同的值,看看一些不同的组合。 ### 查询(Querying) 随着学习的持续深入,两件事情将变得清晰起来。对于Redis而言,关键字就是一切,而值是没有任何意义。更通俗来看就是,Redis不允许你通过值来进行查询。回到上面的例子,我们就不能查询生活在`dune`行星上的用户。 对许多人来说,这会引起一些担忧。在我们生活的世界里,数据查询是如此的灵活和强大,而Redis的方式看起来是这么的原始和不高效。不要让这些扰乱你太久。要记住,Redis不是一种普遍使用(one-size-fits-all)的解决方案,确实存在这么一些事情是不应该由Redis来解决的(因为其查询的限制)。事实上,在考虑了这些情况后,你会找到新的方法去构建你的数据。 很快,我们就能看到更多实际的用例。很重要的一点是,我们要明白关于Redis的这些基本事实。这能帮助我们弄清楚为什么值可以是任何东西,因为Redis从来不需要去读取或理解它们。而且,这也可以帮助我们理清思路,然后去思考如何在这个新世界里建立模型。 ### 存储器和持久化(Memory and Persistence) 我们之前提及过,Redis是一种持久化的存储器内存储(in-memory persistent store)。对于持久化,默认情况下,Redis会根据已变更的关键字数量来进行判断,然后在磁盘里创建数据库的快照(snapshot)。你可以对此进行设置,如果X个关键字已变更,那么每隔Y秒存储数据库一次。默认情况下,如果1000个或更多的关键字已变更,Redis会每隔60秒存储数据库;而如果9个或更少的关键字已变更,Redis会每隔15分钟存储数据库。 除了创建磁盘快照外,Redis可以在附加模式下运行。任何时候,如果有一个关键字变更,一个单一附加(append-only)的文件会在磁盘里进行更新。在一些情况里,虽然硬件或软件可能发生错误,但用那60秒有效数据存储去换取更好性能是可以接受的。而在另一些情况里,这种损失就难以让人接受,Redis为你提供了选择。在第5章里,我们将会看到第三种选择,其将持久化任务减荷到一个从属数据库里。 至于存储器,Redis会将所有数据都保留在存储器中。显而易见,运行Redis具有不低的成本:因为RAM仍然是最昂贵的服务器硬件部件。 我很清楚有一些开发者对即使是一点点的数据空间都是那么的敏感。一本《威廉·莎士比亚全集》需要近5.5MB的存储空间。对于缩放的需求,其它的解决方案趋向于IO-bound或者CPU-bound。这些限制(RAM或者IO)将会需要你去理解更多机器实际依赖的数据类型,以及应该如何去进行存储和查询。除非你是存储大容量的多媒体文件到Redis中,否则存储器内存储应该不会是一个问题。如果这对于一个程序是个问题,你就很可能不会用IO-bound的解决方案。 Redis有虚拟存储器的支持。然而,这个功能已经被认为是失败的了(通过Redis的开发者),而且它的使用已经被废弃了。 (从另一个角度来看,一本5.5MB的《威廉·莎士比亚全集》可以通过压缩减小到近2MB。当然,Redis不会自动对值进行压缩,但是因为其将所有值都看作是字节,没有什么限制让你不能对数据进行压缩/解压,通过牺牲处理时间来换取存储空间。) ### 整体来看(Putting It Together) 我们已经接触了好几个高层次的主题。在继续深入Redis之前,我想做的最后一件事情是将这些主题整合起来。这些主题包括,查询的限制,数据结构以及Redis在存储器内存储数据的方法。 当你将这3个主题整合起来,你最终会得出一个绝妙的结论:速度。一些人可能会想,当然Redis会很快速,要知道所以的东西都在存储器里。但这仅仅是其中的一部分,让Redis闪耀的真正原因是其不同于其它解决方案的特殊数据结构。 能有多快速?这依赖于很多东西,包括你正在使用着哪个命令,数据的类型等等。但Redis的性能测试是趋向于数万或数十万次操作**每秒**。你可以通过运行`redis-benchmark`(就在`redis-server`和`redis-cli`的同一个文件夹里)来进行测试。 我曾经试过将一组使用传统模型的代码转向使用Redis。在传统模型里,运行一个我写的载入测试,需要超过5分钟的时间来完成。而在Redis里,只需要150毫秒就完成了。你不会总能得到这么好的收获,但希望这能让你对我们所谈的东西有更清晰的理解。 理解Redis的这个特性很重要,因为这将影响到你如何去与Redis进行交互。拥有SQL背景的程序员通常会致力于让数据库的数据往返次数减至最小。这对于任何系统都是个好建议,包括Redis。然而,考虑到我们是在处理比较简单的数据结构,有时候我们还是需要与Redis服务器频繁交互,以达到我们的目的。刚开始的时候,可能会对这种数据访问模式感到不太自然。实际上,相对于我们通过Redis获得的高性能而言,这仅仅是微不足道的损失。 ### 小结 虽然我们只接触和摆弄了Redis的冰山一角,但我们讨论的主题已然覆盖了很大范围内的东西。如果觉得有些事情还是不太清楚(例如查询),不用为此而担心,在下一章我们将会继续深入探讨,希望你的问题都能得到解答。 这一章的要点包括: * 关键字(Keys)是用于标识一段数据的一个字符串 * 值(Values)是一段任意的字节序列,Redis不会关注它们实质上是什么 * Redis展示了(也实现了)5种专门的数据结构 * 上面的几点使得Redis快速而且容易使用,但要知道Redis并不适用于所有的应用场景 \clearpage ## 第2章 - 数据结构 现在开始将探究Redis的5种数据结构,我们会解释每种数据结构都是什么,包含了什么有效的方法(Method),以及你能用这些数据结构处理哪些类型的特性和数据。 目前为止,我们所知道的Redis构成仅包括命令、关键字和值,还没有接触到关于数据结构的具体概念。当我们使用`set`命令时,Redis是怎么知道我们是在使用哪个数据结构?其解决方法是,每个命令都相对应于一种特定的数据结构。例如,当你使用`set`命令,你就是将值存储到一个字符串数据结构里。而当你使用`hset`命令,你就是将值存储到一个散列数据结构里。考虑到Redis的关键字集很小,这样的机制具有相当的可管理性。 **[Redis的网站](http://redis.io/commands)里有着非常优秀的参考文档,没有任何理由去重造轮子。但为了搞清楚这些数据结构的作用,我们将会覆盖那些必须知道的重要命令。** 没有什么事情比高兴的玩和试验有趣的东西来得更重要的了。在任何时候,你都能通过键入`flushdb`命令将你数据库里的所有值清除掉,因此,不要再那么害羞了,去尝试做些疯狂的事情吧! ### 字符串(Strings) 在Redis里,字符串是最基本的数据结构。当你在思索着关键字-值对时,你就是在思索着字符串数据结构。不要被名字给搞混了,如之前说过的,你的值可以是任何东西。我更喜欢将他们称作“标量”(Scalars),但也许只有我才这样想。 我们已经看到了一个常见的字符串使用案例,即通过关键字存储对象的实例。有时候,你会频繁地用到这类操作: set users:leto "{name: leto, planet: dune, likes: [spice]}" 除了这些外,Redis还有一些常用的操作。例如,`strlen <key>`能用来获取一个关键字对应值的长度;`getrange <key> <start> <end>`将返回指定范围内的关键字对应值;`append <key> <value>`会将value附加到已存在的关键字对应值中(如果该关键字并不存在,则会创建一个新的关键字-值对)。不要犹豫,去试试看这些命令吧。下面是我得到的: > strlen users:leto (integer) 42 > getrange users:leto 27 40 "likes: [spice]" > append users:leto " OVER 9000!!" (integer) 54 现在你可能会想,这很好,但似乎没有什么意义。你不能有效地提取出一段范围内的JSON文件,或者为其附加一些值。你是对的,这里的经验是,一些命令,尤其是关于字符串数据结构的,只有在给定了明确的数据类型后,才会有实际意义。 之前我们知道了,Redis不会去关注你的值是什么东西。通常情况下,这没有错。然而,一些字符串命令是专门为一些类型或值的结构而设计的。作为一个有些含糊的用例,我们可以看到,对于一些自定义的空间效率很高的(space-efficient)串行化对象,`append`和`getrange`命令将会很有用。对于一个更为具体的用例,我们可以再看一下`incr`、`incrby`、`decr`和`decrby`命令。这些命令会增长或者缩减一个字符串数据结构的值: > incr stats:page:about (integer) 1 > incr stats:page:about (integer) 2 > incrby ratings:video:12333 5 (integer) 5 > incrby ratings:video:12333 3 (integer) 8 由此你可以想象到,Redis的字符串数据结构能很好地用于分析用途。你还可以去尝试增长`users:leto`(一个不是整数的值),然后看看会发生什么(应该会得到一个错误)。 更为进阶的用例是`setbit`和`getbit`命令。“今天我们有多少个独立用户访问”是个在Web应用里常见的问题,有一篇[精彩的博文](http://blog.getspool.com/2011/11/29/fast-easy-realtime-metrics-using-redis-bitmaps/),在里面可以看到Spool是如何使用这两个命令有效地解决此问题。对于1.28亿个用户,一部笔记本电脑在不到50毫秒的时间里就给出了答复,而且只用了16MB的存储空间。 最重要的事情不是在于你是否明白位图(Bitmaps)的工作原理,或者Spool是如何去使用这些命令,而是应该要清楚Redis的字符串数据结构比你当初所想的要有用许多。然而,最常见的应用案例还是上面我们给出的:存储对象(简单或复杂)和计数。同时,由于通过关键字来获取一个值是如此之快,字符串数据结构很常被用来缓存数据。 ### 散列(Hashes) 我们已经知道把Redis称为一种关键字-值型存储是不太准确的,散列数据结构是一个很好的例证。你会看到,在很多方面里,散列数据结构很像字符串数据结构。两者显著的区别在于,散列数据结构提供了一个额外的间接层:一个域(Field)。因此,散列数据结构中的`set`和`get`是: hset users:goku powerlevel 9000 hget users:goku powerlevel 相关的操作还包括在同一时间设置多个域、同一时间获取多个域、获取所有的域和值、列出所有的域或者删除指定的一个域: hmset users:goku race saiyan age 737 hmget users:goku race powerlevel hgetall users:goku hkeys users:goku hdel users:goku age 如你所见,散列数据结构比普通的字符串数据结构具有更多的可操作性。我们可以使用一个散列数据结构去获得更精确的描述,是存储一个用户,而不是一个序列化对象。从而得到的好处是能够提取、更新和删除具体的数据片段,而不必去获取或写入整个值。 对于散列数据结构,可以从一个经过明确定义的对象的角度来考虑,例如一个用户,关键之处在于要理解他们是如何工作的。从性能上的原因来看,这是正确的,更具粒度化的控制可能会相当有用。在下一章我们将会看到,如何用散列数据结构去组织你的数据,使查询变得更为实效。在我看来,这是散列真正耀眼的地方。 ### 列表(Lists) 对于一个给定的关键字,列表数据结构让你可以存储和处理一组值。你可以添加一个值到列表里、获取列表的第一个值或最后一个值以及用给定的索引来处理值。列表数据结构维护了值的顺序,提供了基于索引的高效操作。为了跟踪在网站里注册的最新用户,我们可以维护一个`newusers`的列表: lpush newusers goku ltrim newusers 0 50 **(译注:`ltrim`命令的具体构成是`LTRIM Key start stop`。要理解`ltrim`命令,首先要明白Key所存储的值是一个列表,理论上列表可以存放任意个值。对于指定的列表,根据所提供的两个范围参数start和stop,`ltrim`命令会将指定范围外的值都删除掉,只留下范围内的值。)** 首先,我们将一个新用户推入到列表的前端,然后对列表进行调整,使得该列表只包含50个最近被推入的用户。这是一种常见的模式。`ltrim`是一个具有O(N)时间复杂度的操作,N是被删除的值的数量。从上面的例子来看,我们总是在插入了一个用户后再进行列表调整,实际上,其将具有O(1)的时间复杂度(因为N将永远等于1)的常数性能。 这是我们第一次看到一个关键字的对应值索引另一个值。如果我们想要获取最近的10个用户的详细资料,我们可以运行下面的组合操作: keys = redis.lrange('newusers', 0, 10) redis.mget(*keys.map {|u| "users:#{u}"}) 我们之前谈论过关于多次往返数据的模式,上面的两行Ruby代码为我们进行了很好的演示。 当然,对于存储和索引关键字的功能,并不是只有列表数据结构这种方式。值可以是任意的东西,你可以使用列表数据结构去存储日志,也可以用来跟踪用户浏览网站时的路径。如果你过往曾构建过游戏,你可能会使用列表数据结构去跟踪用户的排队活动。 ### 集合 集合数据结构常常被用来存储只能唯一存在的值,并提供了许多的基于集合的操作,例如并集。集合数据结构没有对值进行排序,但是其提供了高效的基于值的操作。使用集合数据结构的典型用例是朋友名单的实现: sadd friends:leto ghanima paul chani jessica sadd friends:duncan paul jessica alia 不管一个用户有多少个朋友,我们都能高效地(O(1)时间复杂度)识别出用户X是不是用户Y的朋友: sismember friends:leto jessica sismember friends:leto vladimir 而且,我们可以查看两个或更多的人是不是有共同的朋友: sinter friends:leto friends:duncan 甚至可以在一个新的关键字里存储结果: sinterstore friends:leto_duncan friends:leto friends:duncan 有时候需要对值的属性进行标记和跟踪处理,但不能通过简单的复制操作完成,集合数据结构是解决此类问题的最好方法之一。当然,对于那些需要运用集合操作的地方(例如交集和并集),集合数据结构就是最好的选择。 ### 分类集合(Sorted Sets) 最后也是最强大的数据结构是分类集合数据结构。如果说散列数据结构类似于字符串数据结构,主要区分是域(field)的概念;那么分类集合数据结构就类似于集合数据结构,主要区分是标记(score)的概念。标记提供了排序(sorting)和秩划分(ranking)的功能。如果我们想要一个秩分类的朋友名单,可以这样做: zadd friends:duncan 70 ghanima 95 paul 95 chani 75 jessica 1 vladimir 对于`duncan`的朋友,要怎样计算出标记(score)为90或更高的人数? zcount friends:duncan 90 100 如何获取`chani`在名单里的秩(rank)? zrevrank friends:duncan chani **(译注:`zrank`命令的具体构成是`ZRANK Key menber`,要知道Key存储的Sorted Set默认是根据Score对各个menber进行升序的排列,该命令就是用来获取menber在该排列里的次序,这就是所谓的秩。)** 我们使用了`zrevrank`命令而不是`zrank`命令,这是因为Redis的默认排序是从低到高,但是在这个例子里我们的秩划分是从高到低。对于分类集合数据结构,最常见的应用案例是用来实现排行榜系统。事实上,对于一些基于整数排序,且能以标记(score)来进行有效操作的东西,使用分类集合数据结构来处理应该都是不错的选择。 ### 小结 对于Redis的5种数据结构,我们进行了高层次的概述。一件有趣的事情是,相对于最初构建时的想法,你经常能用Redis创造出一些更具实效的事情。对于字符串数据结构和分类集合数据结构的使用,很有可能存在一些构建方法是还没有人想到的。当你理解了那些常用的应用案例后,你将发现Redis对于许多类型的问题,都是很理想的选择。还有,不要因为Redis展示了5种数据结构和相应的各种方法,就认为你必须要把所有的东西都用上。只使用一些命令去构建一个特性是很常见的。 \clearpage ## 第3章 - 使用数据结构 在上一章里,我们谈论了Redis的5种数据结构,对于一些可能的用途也给出了用例。现在是时候来看看一些更高级,但依然很常见的主题和设计模式。 ### 大O表示法(Big O Notation) 在本书中,我们之前就已经看到过大O表示法,包括O(1)和O(N)的表示。大O表示法的惯常用途是,描述一些用于处理一定数量元素的行为的综合表现。在Redis里,对于一个要处理一定数量元素的命令,大O表示法让我们能了解该命令的大概运行速度。 在Redis的文档里,每一个命令的时间复杂度都用大O表示法进行了描述,还能知道各命令的具体性能会受什么因素影响。让我们来看看一些用例。 常数时间复杂度O(1)被认为是最快速的,无论我们是在处理5个元素还是5百万个元素,最终都能得到相同的性能。对于`sismember`命令,其作用是告诉我们一个值是否属于一个集合,时间复杂度为O(1)。`sismember`命令很强大,很大部分的原因是其高效的性能特征。许多Redis命令都具有O(1)的时间复杂度。 对数时间复杂度O(log(N))被认为是第二快速的,其通过使需扫描的区间不断皱缩来快速完成处理。使用这种“分而治之”的方式,大量的元素能在几个迭代过程里被快速分解完整。`zadd`命令的时间复杂度就是O(log(N)),其中N是在分类集合中的元素数量。 再下来就是线性时间复杂度O(N),在一个表格的非索引列里进行查找就需要O(N)次操作。`ltrim`命令具有O(N)的时间复杂度,但是,在`ltrim`命令里,N不是列表所拥有的元素数量,而是被删除的元素数量。从一个具有百万元素的列表里用`ltrim`命令删除1个元素,要比从一个具有一千个元素的列表里用`ltrim`命令删除10个元素来的快速(实际上,两者很可能会是一样快,因为两个时间都非常的小)。 根据给定的最小和最大的值的标记,`zremrangebyscore`命令会在一个分类集合里进行删除元素操作,其时间复杂度是O(log(N)+M)。这看起来似乎有点儿杂乱,通过阅读文档可以知道,这里的N指的是在分类集合里的总元素数量,而M则是被删除的元素数量。可以看出,对于性能而言,被删除的元素数量很可能会比分类集合里的总元素数量更为重要。 **(译注:`zremrangebyscore`命令的具体构成是`ZREMRANGEBYSCORE Key max mix`。)** 对于`sort`命令,其时间复杂度为O(N+M*log(M)),我们将会在下一章谈论更多的相关细节。从`sort`命令的性能特征来看,可以说这是Redis里最复杂的一个命令。 还存在其他的时间复杂度描述,包括O(N^2)和O(C^N)。随着N的增大,其性能将急速下降。在Redis里,没有任何一个命令具有这些类型的时间复杂度。 值得指出的一点是,在Redis里,当我们发现一些操作具有O(N)的时间复杂度时,我们可能可以找到更为好的方法去处理。 **(译注:对于Big O Notation,相信大家都非常的熟悉,虽然原文仅仅是对该表示法进行简单的介绍,但限于个人的算法知识和文笔水平实在有限,此小节的翻译让我头痛颇久,最终成果也确实难以让人满意,望见谅。)** ### 仿多关键字查询(Pseudo Multi Key Queries) 时常,你会想通过不同的关键字去查询相同的值。例如,你会想通过电子邮件(当用户开始登录时)去获取用户的具体信息,或者通过用户id(在用户登录后)去获取。有一种很不实效的解决方法,其将用户对象分别放置到两个字符串值里去: set users:[email protected] "{id: 9001, email: '[email protected]', ...}" set users:9001 "{id: 9001, email: '[email protected]', ...}" 这种方法很糟糕,如此不但会产生两倍数量的内存,而且这将会成为数据管理的恶梦。 如果Redis允许你将一个关键字链接到另一个的话,可能情况会好很多,可惜Redis并没有提供这样的功能(而且很可能永远都不会提供)。Redis发展到现在,其开发的首要目的是要保持代码和API的整洁简单,关键字链接功能的内部实现并不符合这个前提(对于关键字,我们还有很多相关方法没有谈论到)。其实,Redis已经提供了解决的方法:散列。 使用散列数据结构,我们可以摆脱重复的缠绕: set users:9001 "{id: 9001, email: [email protected], ...}" hset users:lookup:email [email protected] 9001 我们所做的是,使用域来作为一个二级索引,然后去引用单个用户对象。要通过id来获取用户信息,我们可以使用一个普通的`get`命令: get users:9001 而如果想通过电子邮箱来获取用户信息,我们可以使用`hget`命令再配合使用`get`命令(Ruby代码): id = redis.hget('users:lookup:email', '[email protected]') user = redis.get("users:#{id}") 你很可能将会经常使用这类用法。在我看来,这就是散列真正耀眼的地方。在你了解这类用法之前,这可能不是一个明显的用例。 ### 引用和索引(References and Indexes) 我们已经看过几个关于值引用的用例,包括介绍列表数据结构时的用例,以及在上面使用散列数据结构来使查询更灵活一些。进行归纳后会发现,对于那些值与值间的索引和引用,我们都必须手动的去管理。诚实来讲,这确实会让人有点沮丧,尤其是当你想到那些引用相关的操作,如管理、更新和删除等,都必须手动的进行时。在Redis里,这个问题还没有很好的解决方法。 我们已经看到,集合数据结构很常被用来实现这类索引: sadd friends:leto ghanima paul chani jessica 这个集合里的每一个成员都是一个Redis字符串数据结构的引用,而每一个引用的值则包含着用户对象的具体信息。那么如果`chani`改变了她的名字,或者删除了她的帐号,应该如何处理?从整个朋友圈的关系结构来看可能会更好理解,我们知道,`chani`也有她的朋友: sadd friends_of:chani leto paul 如果你有什么待处理情况像上面那样,那在维护成本之外,还会有对于额外索引值的处理和存储空间的成本。这可能会令你感到有点退缩。在下一小节里,我们将会谈论减少使用额外数据交互的性能成本的一些方法(在第1章我们粗略地讨论了下)。 如果你确实在担忧着这些情况,其实,关系型数据库也有同样的开销。索引需要一定的存储空间,必须通过扫描或查找,然后才能找到相应的记录。其开销也是存在的,当然他们对此做了很多的优化工作,使之变得更为有效。 再次说明,需要在Redis里手动地管理引用确实是颇为棘手。但是,对于你关心的那些问题,包括性能或存储空间等,应该在经过测试后,才会有真正的理解。我想你会发现这不会是一个大问题。 ### 数据交互和流水线(Round Trips and Pipelining) 我们已经提到过,与服务器频繁交互是Redis的一种常见模式。这类情况可能很常出现,为了使我们能获益更多,值得仔细去看看我们能利用哪些特性。 许多命令能接受一个或更多的参数,也有一种关联命令(sister-command)可以接受多个参数。例如早前我们看到过`mget`命令,接受多个关键字,然后返回值: keys = redis.lrange('newusers', 0, 10) redis.mget(*keys.map {|u| "users:#{u}"}) 或者是`sadd`命令,能添加一个或多个成员到集合里: sadd friends:vladimir piter sadd friends:paul jessica leto "leto II" chani Redis还支持流水线功能。通常情况下,当一个客户端发送请求到Redis后,在发送下一个请求之前必须等待Redis的答复。使用流水线功能,你可以发送多个请求,而不需要等待Redis响应。这不但减少了网络开销,还能获得性能上的显著提高。 值得一提的是,Redis会使用存储器去排列命令,因此批量执行命令是一个好主意。至于具体要多大的批量,将取决于你要使用什么命令(更明确来说,该参数有多大)。另一方面来看,如果你要执行的命令需要差不多50个字符的关键字,你大概可以对此进行数千或数万的批量操作。 对于不同的Redis载体,在流水线里运行命令的方式会有所差异。在Ruby里,你传递一个代码块到`pipelined`方法: redis.pipelined do 9001.times do redis.incr('powerlevel') end end 正如你可能猜想到的,流水线功能可以实际地加速一连串命令的处理。 ### 事务(Transactions) 每一个Redis命令都具有原子性,包括那些一次处理多项事情的命令。此外,对于使用多个命令,Redis支持事务功能。 你可能不知道,但Redis实际上是单线程运行的,这就是为什么每一个Redis命令都能够保证具有原子性。当一个命令在执行时,没有其他命令会运行(我们会在往后的章节里简略谈论一下Scaling)。在你考虑到一些命令去做多项事情时,这会特别的有用。例如: `incr`命令实际上就是一个`get`命令然后紧随一个`set`命令。 `getset`命令设置一个新的值然后返回原始值。 `setnx`命令首先测试关键字是否存在,只有当关键字不存在时才设置值 虽然这些都很有用,但在实际开发时,往往会需要运行具有原子性的一组命令。若要这样做,首先要执行`multi`命令,紧随其后的是所有你想要执行的命令(作为事务的一部分),最后执行`exec`命令去实际执行命令,或者使用`discard`命令放弃执行命令。Redis的事务功能保证了什么? * 事务中的命令将会按顺序地被执行 * 事务中的命令将会如单个原子操作般被执行(没有其它的客户端命令会在中途被执行) * 事务中的命令要么全部被执行,要么不会执行 你可以(也应该)在命令行界面对事务功能进行一下测试。还有一点要注意到,没有什么理由不能结合流水线功能和事务功能。 multi hincrby groups:1percent balance -9000000000 hincrby groups:99percent balance 9000000000 exec 最后,Redis能让你指定一个关键字(或多个关键字),当关键字有改变时,可以查看或者有条件地应用一个事务。这是用于当你需要获取值,且待运行的命令基于那些值时,所有都在一个事务里。对于上面展示的代码,我们不能去实现自己的`incr`命令,因为一旦`exec`命令被调用,他们会全部被执行在一块。我们不能这么做: redis.multi() current = redis.get('powerlevel') redis.set('powerlevel', current + 1) redis.exec() **(译注:虽然Redis是单线程运行的,但是我们可以同时运行多个Redis客户端进程,常见的并发问题还是会出现。像上面的代码,在`get`运行之后,`set`运行之前,`powerlevel`的值可能会被另一个Redis客户端给改变,从而造成错误。)** 这些不是Redis的事务功能的工作。但是,如果我们增加一个`watch`到`powerlevel`,我们可以这样做: redis.watch('powerlevel') current = redis.get('powerlevel') redis.multi() redis.set('powerlevel', current + 1) redis.exec() 在我们调用`watch`后,如果另一个客户端改变了`powerlevel`的值,我们的事务将会运行失败。如果没有客户端改变`powerlevel`的值,那么事务会继续工作。我们可以在一个循环里运行这些代码,直到其能正常工作。 ### 关键字反模式(Keys Anti-Pattern) 在下一章中,我们将会谈论那些没有确切关联到数据结构的命令,其中的一些是管理或调试工具。然而有一个命令我想特别地在这里进行谈论:`keys`命令。这个命令需要一个模式,然后查找所有匹配的关键字。这个命令看起来很适合一些任务,但这不应该用在实际的产品代码里。为什么?因为这个命令通过线性扫描所有的关键字来进行匹配。或者,简单地说,这个命令太慢了。 人们会如此去使用这个命令?一般会用来构建一个本地的Bug追踪服务。每一个帐号都有一个`id`,你可能会通过一个看起来像`bug:account_id:bug_id`的关键字,把每一个Bug存储到一个字符串数据结构值中去。如果你在任何时候需要查询一个帐号的Bug(显示它们,或者当用户删除了帐号时删除掉这些Bugs),你可能会尝试去使用`keys`命令: keys bug:1233:* 更好的解决方法应该使用一个散列数据结构,就像我们可以使用散列数据结构来提供一种方法去展示二级索引,因此我们可以使用域来组织数据: hset bugs:1233 1 "{id:1, account: 1233, subject: '...'}" hset bugs:1233 2 "{id:2, account: 1233, subject: '...'}" 从一个帐号里获取所有的Bug标识,可以简单地调用`hkeys bugs:1233`。去删除一个指定的Bug,可以调用`hdel bugs:1233 2`。如果要删除了一个帐号,可以通过`del bugs:1233`把关键字删除掉。 ### 小结 结合这一章以及前一章,希望能让你得到一些洞察力,了解如何使用Redis去支持(Power)实际项目。还有其他的模式可以让你去构建各种类型的东西,但真正的关键是要理解基本的数据结构。你将能领悟到,这些数据结构是如何能够实现你最初视角之外的东西。 \clearpage ## 第4章 超越数据结构 5种数据结构组成了Redis的基础,其他没有关联特定数据结构的命令也有很多。我们已经看过一些这样的命令:`info`, `select`, `flushdb`, `multi`, `exec`, `discard`, `watch`和`keys `。这一章将看看其他的一些重要命令。 ### 使用期限(Expiration) Redis允许你标记一个关键字的使用期限。你可以给予一个Unix时间戳形式(自1970年1月1日起)的绝对时间,或者一个基于秒的存活时间。这是一个基于关键字的命令,因此其不在乎关键字表示的是哪种类型的数据结构。 expire pages:about 30 expireat pages:about 1356933600 第一个命令将会在30秒后删除掉关键字(包括其关联的值)。第二个命令则会在2012年12月31日上午12点删除掉关键字。 这让Redis能成为一个理想的缓冲引擎。通过`ttl`命令,你可以知道一个关键字还能够存活多久。而通过`persist`命令,你可以把一个关键字的使用期限删除掉。 ttl pages:about persist pages:about 最后,有个特殊的字符串命令,`setex`命令让你可以在一个单独的原子命令里设置一个字符串值,同时里指定一个生存期(这比任何事情都要方便)。 setex pages:about 30 '<h1>about us</h1>....' ### 发布和订阅(Publication and Subscriptions) Redis的列表数据结构有`blpop`和`brpop`命令,能从列表里返回且删除第一个(或最后一个)元素,或者被堵塞,直到有一个元素可供操作。这可以用来实现一个简单的队列。 **(译注:对于`blpop`和`brpop`命令,如果列表里没有关键字可供操作,连接将被堵塞,直到有另外的Redis客户端使用`lpush`或`rpush`命令推入关键字为止。)** 此外,Redis对于消息发布和频道订阅有着一流的支持。你可以打开第二个`redis-cli`窗口,去尝试一下这些功能。在第一个窗口里订阅一个频道(我们会称它为`warnings`): subscribe warnings 其将会答复你订阅的信息。现在,在另一个窗口,发布一条消息到`warnings`频道: publish warnings "it's over 9000!" 如果你回到第一个窗口,你应该已经接收到`warnings`频道发来的消息。 你可以订阅多个频道(`subscribe channel1 channel2 ...`),订阅一组基于模式的频道(`psubscribe warnings:*`),以及使用`unsubscribe`和`punsubscribe`命令停止监听一个或多个频道,或一个频道模式。 最后,可以注意到`publish`命令的返回值是1,这指出了接收到消息的客户端数量。 ### 监控和延迟日志(Monitor and Slow Log) `monitor`命令可以让你查看Redis正在做什么。这是一个优秀的调试工具,能让你了解你的程序如何与Redis进行交互。在两个`redis-cli`窗口中选一个(如果其中一个还处于订阅状态,你可以使用`unsubscribe`命令退订,或者直接关掉窗口再重新打开一个新窗口)键入`monitor`命令。在另一个窗口,执行任何其他类型的命令(例如`get`或`set`命令)。在第一个窗口里,你应该可以看到这些命令,包括他们的参数。 在实际生产环境里,你应该谨慎运行`monitor`命令,这真的仅仅就是一个很有用的调试和开发工具。除此之外,没有更多要说的了。 随同`monitor`命令一起,Redis拥有一个`slowlog`命令,这是一个优秀的性能剖析工具。其会记录执行时间超过一定数量**微秒**的命令。在下一章节,我们会简略地涉及如何配置Redis,现在你可以按下面的输入配置Redis去记录所有的命令: config set slowlog-log-slower-than 0 然后,执行一些命令。最后,你可以检索到所有日志,或者检索最近的那些日志: slowlog get slowlog get 10 通过键入`slowlog len`,你可以获取延迟日志里的日志数量。 对于每个被你键入的命令,你应该查看4个参数: * 一个自动递增的id * 一个Unix时间戳,表示命令开始运行的时间 * 一个微妙级的时间,显示命令运行的总时间 * 该命令以及所带参数 延迟日志保存在存储器中,因此在生产环境中运行(即使有一个低阀值)也应该不是一个问题。默认情况下,它将会追踪最近的1024个日志。 ### 排序(Sort) `sort`命令是Redis最强大的命令之一。它让你可以在一个列表、集合或者分类集合里对值进行排序(分类集合是通过标记来进行排序,而不是集合里的成员)。下面是一个`sort`命令的简单用例: rpush users:leto:guesses 5 9 10 2 4 10 19 2 sort users:leto:guesses 这将返回进行升序排序后的值。这里有一个更高级的例子: sadd friends:ghanima leto paul chani jessica alia duncan sort friends:ghanima limit 0 3 desc alpha 上面的命令向我们展示了,如何对已排序的记录进行分页(通过`limit`),如何返回降序排序的结果(通过`desc`),以及如何用字典序排序代替数值序排序(通过`alpha`)。 `sort`命令的真正力量是其基于引用对象来进行排序的能力。早先的时候,我们说明了列表、集合和分类集合很常被用于引用其他的Redis对象,`sort`命令能够解引用这些关系,而且通过潜在值来进行排序。例如,假设我们有一个Bug追踪器能让用户看到各类已存在问题。我们可能使用一个集合数据结构去追踪正在被监视的问题: sadd watch:leto 12339 1382 338 9338 你可能会有强烈的感觉,想要通过id来排序这些问题(默认的排序就是这样的),但是,我们更可能是通过问题的严重性来对这些问题进行排序。为此,我们要告诉Redis将使用什么模式来进行排序。首先,为了可以看到一个有意义的结果,让我们添加多一点数据: set severity:12339 3 set severity:1382 2 set severity:338 5 set severity:9338 4 要通过问题的严重性来降序排序这些Bug,你可以这样做: sort watch:leto by severity:* desc Redis将会用存储在列表(集合或分类集合)中的值去替代模式中的`*`(通过`by`)。这会创建出关键字名字,Redis将通过查询其实际值来排序。 在Redis里,虽然你可以有成千上万个关键字,类似上面展示的关系还是会引起一些混乱。幸好,`sort`命令也可以工作在散列数据结构及其相关域里。相对于拥有大量的高层次关键字,你可以利用散列: hset bug:12339 severity 3 hset bug:12339 priority 1 hset bug:12339 details "{id: 12339, ....}" hset bug:1382 severity 2 hset bug:1382 priority 2 hset bug:1382 details "{id: 1382, ....}" hset bug:338 severity 5 hset bug:338 priority 3 hset bug:338 details "{id: 338, ....}" hset bug:9338 severity 4 hset bug:9338 priority 2 hset bug:9338 details "{id: 9338, ....}" 所有的事情不仅变得更为容易管理,而且我们能通过`severity`或`priority`来进行排序,还可以告诉`sort`命令具体要检索出哪一个域的数据: sort watch:leto by bug:*->priority get bug:*->details 相同的值替代出现了,但Redis还能识别`->`符号,用它来查看散列中指定的域。里面还包括了`get`参数,这里也会进行值替代和域查看,从而检索出Bug的细节(details域的数据)。 对于太大的集合,`sort`命令的执行可能会变得很慢。好消息是,`sort`命令的输出可以被存储起来: sort watch:leto by bug:*->priority get bug:*->details store watch_by_priority:leto 使用我们已经看过的`expiration`命令,再结合`sort`命令的`store`能力,这是一个美妙的组合。 ### 小结 这一章主要关注那些非特定数据结构关联的命令。和其他事情一样,它们的使用依情况而定。构建一个程序或特性时,可能不会用到使用期限、发布和订阅或者排序等功能。但知道这些功能的存在是很好的。而且,我们也只接触到了一些命令。还有更多的命令,当你消化理解完这本书后,非常值得去浏览一下[完整的命令列表](http://redis.io/commands)。 \clearpage ## 第5章 - 管理 在最后一章里,我们将集中谈论Redis运行中的一些管理方面内容。这是一个不完整的Redis管理指南,我们将会回答一些基本的问题,初接触Redis的新用户可能会很感兴趣。 ### 配置(Configuration) 当你第一次运行Redis的服务器,它会向你显示一个警告,指`redis.conf`文件没有被找到。这个文件可以被用来配置Redis的各个方面。一个充分定义(well-documented)的`redis.conf`文件对各个版本的Redis都有效。范例文件包含了默认的配置选项,因此,对于想要了解设置在干什么,或默认设置是什么,都会很有用。你可以在<https://github.com/antirez/redis/raw/2.4.6/redis.conf>找到这个文件。 **这个配置文件针对的是Redis 2.4.6,你应该用你的版本号替代上面URL里的"2.4.6"。运行`info`命令,其显示的第一个值就是Redis的版本号。** 因为这个文件已经是充分定义(well-documented),我们就不去再进行设置了。 除了通过`redis.conf`文件来配置Redis,`config set`命令可以用来对个别值进行设置。实际上,在将`slowlog-log-slower-than`设置为0时,我们就已经使用过这个命令了。 还有一个`config get`命令能显示一个设置值。这个命令支持模式匹配,因此如果我们想要显示关联于日志(logging)的所有设置,我们可以这样做: config get *log* ### 验证(Authentication) 通过设置`requirepass`(使用`config set`命令或`redis.conf`文件),可以让Redis需要一个密码验证。当`requirepass`被设置了一个值(就是待用的密码),客户端将需要执行一个`auth password`命令。 一旦一个客户端通过了验证,就可以在任意数据库里执行任何一条命令,包括`flushall`命令,这将会清除掉每一个数据库里的所有关键字。通过配置,你可以重命名一些重要命令为混乱的字符串,从而获得一些安全性。 rename-command CONFIG 5ec4db169f9d4dddacbfb0c26ea7e5ef rename-command FLUSHALL 1041285018a942a4922cbf76623b741e 或者,你可以将新名字设置为一个空字符串,从而禁用掉一个命令。 ### 大小限制(Size Limitations) 当你开始使用Redis,你可能会想知道,我能使用多少个关键字?还可能想知道,一个散列数据结构能有多少个域(尤其是当你用它来组织数据时),或者是,一个列表数据结构或集合数据结构能有多少个元素?对于每一个实例,实际限制都能达到亿万级别(hundreds of millions)。 ### 复制(Replication) Redis支持复制功能,这意味着当你向一个Redis实例(Master)进行写入时,一个或多个其他实例(Slaves)能通过Master实例来保持更新。可以在配置文件里设置`slaveof`,或使用`slaveof`命令来配置一个Slave实例。对于那些没有进行这些设置的Redis实例,就可能一个Master实例。 为了更好保护你的数据,复制功能拷贝数据到不同的服务器。复制功能还能用于改善性能,因为读取请求可以被发送到Slave实例。他们可能会返回一些稍微滞后的数据,但对于大多数程序来说,这是一个值得做的折衷。 遗憾的是,Redis的复制功能还没有提供自动故障恢复。如果Master实例崩溃了,一个Slave实例需要手动的进行升级。如果你想使用Redis去达到某种高可用性,对于使用心跳监控(heartbeat monitoring)和脚本自动开关(scripts to automate the switch)的传统高可用性工具来说,现在还是一个棘手的难题。 ### 备份文件(Backups) 备份Redis非常简单,你可以将Redis的快照(snapshot)拷贝到任何地方,包括S3、FTP等。默认情况下,Redis会把快照存储为一个名为`dump.rdb`的文件。在任何时候,你都可以对这个文件执行`scp`、`ftp`或`cp`等常用命令。 有一种常见情况,在Master实例上会停用快照以及单一附加文件(aof),然后让一个Slave实例去处理备份事宜。这可以帮助减少Master实例的载荷。在不损害整体系统响应性的情况下,你还可以在Slave实例上设置更多主动存储的参数。 ### 缩放和Redis集群(Scaling and Redis Cluster) 复制功能(Replication)是一个成长中的网站可以利用的第一个工具。有一些命令会比另外一些来的昂贵(例如`sort`命令),将这些运行载荷转移到一个Slave实例里,可以保持整体系统对于查询的快速响应。 此外,通过分发你的关键字到多个Redis实例里,可以达到真正的缩放Redis(记住,Redis是单线程的,这些可以运行在同一个逻辑框里)。随着时间的推移,你将需要特别注意这些事情(尽管许多的Redis载体都提供了consistent-hashing算法)。对于数据水平分布(horizontal distribution)的考虑不在这本书所讨论的范围内。这些东西你也很可能不需要去担心,但是,无论你使用哪一种解决方案,有一些事情你还是必须意识到。 好消息是,这些工作都可在Redis集群下进行。不仅提供水平缩放(包括均衡),为了高可用性,还提供了自动故障恢复。 高可用性和缩放是可以达到的,只要你愿意为此付出时间和精力,Redis集群也使事情变得简单多了。 ### 小结 在过去的一段时间里,已经有许多的计划和网站使用了Redis,毫无疑问,Redis已经可以应用于实际生产中了。然而,一些工具还是不够成熟,尤其是一些安全性和可用性相关的工具。对于Redis集群,我们希望很快就能看到其实现,这应该能为一些现有的管理挑战提供处理帮忙。 \clearpage ## 总结 在许多方面,Redis体现了一种简易的数据处理方式,其剥离掉了大部分的复杂性和抽象,并可有效的在不同系统里运行。不少情况下,选择Redis不是最佳的选择。在另一些情况里,Redis就像是为你的数据提供了特别定制的解决方案。 最终,回到我最开始所说的:Redis很容易学习。现在有许多的新技术,很难弄清楚哪些才真正值得我们花时间去学习。如果你从实际好处来考虑,Redis提供了他的简单性。我坚信,对于你和你的团队,学习Redis是最好的技术投资之一。
hugolib/redis.cn.md
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.0010868769604712725, 0.00032905678381212056, 0.00016423290071543306, 0.000205534728593193, 0.00022793695097789168 ]
{ "id": 2, "code_window": [ "\n", "\tif err := s.RenderPages(); err != nil {\n", "\t\tt.Fatalf(\"Unable to render pages. %s\", err)\n", "\t}\n", "\n", "\tcontent, ok := target.files[\"content/blue/slug-doc-1.html\"]\n", "\tif !ok {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\ttests := []struct {\n", "\t\tfile, expected string\n", "\t}{\n", "\t\t{\"content/blue/doc2.html\", \"<html><head></head><body><a href=\\\"http://auth/bub/foobar.jpg\\\">Going</a></body></html>\"},\n", "\t\t{\"sect/doc1.html\", \"<!DOCTYPE html><html><head></head><body><a href=\\\"#frag1\\\">link</a></body></html>\"},\n", "\t}\n", "\n", "\tfor _, test := range tests {\n", "\tcontent, ok := target.files[test.file]\n" ], "file_path": "hugolib/site_test.go", "type": "replace", "edit_start_line_idx": 271 }
package transform import ( htmltran "code.google.com/p/go-html-transform/html/transform" "io" "net/url" ) type Transformer struct { BaseURL string } func (t *Transformer) Apply(r io.Reader, w io.Writer) (err error) { var tr *htmltran.Transformer if tr, err = htmltran.NewFromReader(r); err != nil { return } if err = t.absUrlify(tr, elattr{"a", "href"}, elattr{"script", "src"}); err != nil { return } return tr.Render(w) } type elattr struct { tag, attr string } func (t *Transformer) absUrlify(tr *htmltran.Transformer, selectors ...elattr) (err error) { var baseURL, inURL *url.URL if baseURL, err = url.Parse(t.BaseURL); err != nil { return } replace := func(in string) string { if inURL, err = url.Parse(in); err != nil { return in + "?" } return baseURL.ResolveReference(inURL).String() } for _, el := range selectors { if err = tr.Apply(htmltran.TransformAttrib(el.attr, replace), el.tag); err != nil { return } } return }
transform/post.go
1
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.0003067690704483539, 0.0001946439006133005, 0.00016566619160585105, 0.00017337873578071594, 0.00005030831744079478 ]
{ "id": 2, "code_window": [ "\n", "\tif err := s.RenderPages(); err != nil {\n", "\t\tt.Fatalf(\"Unable to render pages. %s\", err)\n", "\t}\n", "\n", "\tcontent, ok := target.files[\"content/blue/slug-doc-1.html\"]\n", "\tif !ok {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\ttests := []struct {\n", "\t\tfile, expected string\n", "\t}{\n", "\t\t{\"content/blue/doc2.html\", \"<html><head></head><body><a href=\\\"http://auth/bub/foobar.jpg\\\">Going</a></body></html>\"},\n", "\t\t{\"sect/doc1.html\", \"<!DOCTYPE html><html><head></head><body><a href=\\\"#frag1\\\">link</a></body></html>\"},\n", "\t}\n", "\n", "\tfor _, test := range tests {\n", "\tcontent, ok := target.files[test.file]\n" ], "file_path": "hugolib/site_test.go", "type": "replace", "edit_start_line_idx": 271 }
--- title: "Variables" date: "2013-07-01" aliases: ["/doc/variables/"] --- Hugo makes a set of values available to the templates. Go templates are context based. The following are available in the context for the templates. ## Page Variables **.Title** The title for the content.<br> **.Description** The description for the content.<br> **.Keywords** The meta keywords for this content.<br> **.Date** The date the content is published on.<br> **.Indexes** These will use the field name of the plural form of the index (see tags and categories above)<br> **.Permalink** The Permanent link for this page.<br> **.FuzzyWordCount** The approximate number of words in the content.<br> **.RSSLink** Link to the indexes' rss link <br> **.Prev** Pointer to the previous content (based on pub date)<br> **.Next** Pointer to the following content (based on pub date)<br> **.Site** See site variables below<br> **.Content** The content itself, defined below the front matter.<br> **.Summary** A generated summary of the content for easily showing a snippet in a summary view.<br> Any value defined in the front matter, including indexes will be made available under `.Params`. Take for example I'm using tags and categories as my indexes. The following would be how I would access them: **.Params.Tags** <br> **.Params.Categories** <br> ## Node Variables In Hugo a node is any page not rendered directly by a content file. This includes indexes, lists and the homepage. **.Title** The title for the content.<br> **.Date** The date the content is published on.<br> **.Data** The data specific to this type of node.<br> **.Permalink** The Permanent link for this node<br> **.Url** The relative url for this node.<br> **.RSSLink** Link to the indexes' rss link <br> **.Site** See site variables below<br> ## Site Variables Also available is `.Site` which has the following: **.Site.BaseUrl** The base URL for the site as defined in the config.json file.<br> **.Site.Indexes** The names of the indexes of the site.<br> **.Site.LastChange** The date of the last change of the most recent content.<br> **.Site.Recent** Array of all content ordered by Date, newest first<br>
docs/content/layout/variables.md
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.00016977555060293525, 0.00016641760885249823, 0.00016323683666996658, 0.00016617056098766625, 0.0000026107290977961384 ]
{ "id": 2, "code_window": [ "\n", "\tif err := s.RenderPages(); err != nil {\n", "\t\tt.Fatalf(\"Unable to render pages. %s\", err)\n", "\t}\n", "\n", "\tcontent, ok := target.files[\"content/blue/slug-doc-1.html\"]\n", "\tif !ok {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\ttests := []struct {\n", "\t\tfile, expected string\n", "\t}{\n", "\t\t{\"content/blue/doc2.html\", \"<html><head></head><body><a href=\\\"http://auth/bub/foobar.jpg\\\">Going</a></body></html>\"},\n", "\t\t{\"sect/doc1.html\", \"<!DOCTYPE html><html><head></head><body><a href=\\\"#frag1\\\">link</a></body></html>\"},\n", "\t}\n", "\n", "\tfor _, test := range tests {\n", "\tcontent, ok := target.files[test.file]\n" ], "file_path": "hugolib/site_test.go", "type": "replace", "edit_start_line_idx": 271 }
--- title: "Using Hugo" date: "2013-07-01" aliases: ["/doc/usage/"] --- Make sure either hugo is in your path or provide a path to it. $ hugo --help usage: hugo [flags] [] -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ -D, --build-drafts=false: include content marked as draft --config="": config file (default is path/config.yaml|json|toml) -d, --destination="": filesystem path to write files to -h, --help=false: show this help --port="1313": port to run web server on, default :1313 -S, --server=false: run a (very) simple web server -s, --source="": filesystem path to read files relative from --uglyurls=false: if true, use /filename.html instead of /filename/ -v, --verbose=false: verbose output --version=false: which version of hugo -w, --watch=false: watch filesystem for changes and recreate as needed ## Common Usage Example: The most common use is probably to run hugo with your current directory being the input directory. $ hugo > X pages created > Y indexes created in 8 ms If you are working on things and want to see the changes immediately, tell Hugo to watch for changes. **It will recreate the site faster than you can tab over to your browser to view the changes.** $ hugo -s ~/mysite --watch Watching for changes. Press ctrl+c to stop 15 pages created 0 tags created in 8 ms Hugo can even run a server and create your site at the same time! $hugo --server -ws ~/mysite Watching for changes. Press ctrl+c to stop 15 pages created 0 tags created in 8 ms Web Server is available at http://localhost:1313 Press ctrl+c to stop
docs/content/overview/usage.md
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.00017396861221641302, 0.0001693293306743726, 0.00016629086167085916, 0.0001690255885478109, 0.0000025794204248086317 ]
{ "id": 2, "code_window": [ "\n", "\tif err := s.RenderPages(); err != nil {\n", "\t\tt.Fatalf(\"Unable to render pages. %s\", err)\n", "\t}\n", "\n", "\tcontent, ok := target.files[\"content/blue/slug-doc-1.html\"]\n", "\tif !ok {\n" ], "labels": [ "keep", "keep", "keep", "keep", "keep", "replace", "keep" ], "after_edit": [ "\ttests := []struct {\n", "\t\tfile, expected string\n", "\t}{\n", "\t\t{\"content/blue/doc2.html\", \"<html><head></head><body><a href=\\\"http://auth/bub/foobar.jpg\\\">Going</a></body></html>\"},\n", "\t\t{\"sect/doc1.html\", \"<!DOCTYPE html><html><head></head><body><a href=\\\"#frag1\\\">link</a></body></html>\"},\n", "\t}\n", "\n", "\tfor _, test := range tests {\n", "\tcontent, ok := target.files[test.file]\n" ], "file_path": "hugolib/site_test.go", "type": "replace", "edit_start_line_idx": 271 }
package hugolib import ( "path" "strings" "testing" ) var SIMPLE_PAGE_YAML = `--- contenttype: "" --- Sample Text ` func TestDegenerateMissingFolderInPageFilename(t *testing.T) { p, err := ReadFrom(strings.NewReader(SIMPLE_PAGE_YAML), path.Join("foobar")) if err != nil { t.Fatalf("Error in ReadFrom") } if p.Section != "" { t.Fatalf("No section should be set for a file path: foobar") } } func TestNewPageWithFilePath(t *testing.T) { toCheck := []struct { input string section string layout string }{ {path.Join("sub", "foobar.html"), "sub", "sub/single.html"}, {path.Join("content", "sub", "foobar.html"), "sub", "sub/single.html"}, {path.Join("content", "dub", "sub", "foobar.html"), "sub", "sub/single.html"}, } for _, el := range toCheck { p, err := ReadFrom(strings.NewReader(SIMPLE_PAGE_YAML), el.input) p.guessSection() if err != nil { t.Fatalf("Reading from SIMPLE_PAGE_YAML resulted in an error: %s", err) } if p.Section != el.section { t.Fatalf("Section not set to %s for page %s. Got: %s", el.section, el.input, p.Section) } if p.Layout() != el.layout { t.Fatalf("Layout incorrect. Expected: '%s', Got: '%s'", el.layout, p.Layout()) } } }
hugolib/path_seperators_test.go
0
https://github.com/gohugoio/hugo/commit/784077da4dcc3476f61bbf99c5f873b71694dd64
[ 0.001742731430567801, 0.00046728868619538844, 0.0001671797363087535, 0.00018754100892692804, 0.0005737021565437317 ]