hunk
dict | file
stringlengths 0
11.8M
| file_path
stringlengths 2
234
| label
int64 0
1
| commit_url
stringlengths 74
103
| dependency_score
sequencelengths 5
5
|
---|---|---|---|---|---|
{
"id": 9,
"code_window": [
"\t_, e := sl[b]\n",
"\treturn e\n",
"}\n",
"\n",
"func runServ(k *cli.Context) {\n",
"\tlog.Trace(\"new serv request \" + log.Mode + \":\" + log.Config)\n",
"\n",
"\tbase.NewConfigContext()\n",
"\tmodels.LoadModelsConfig()\n",
"\tmodels.NewEngine()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\texecDir, _ := base.ExecDir()\n",
"\tnewLogger(execDir)\n"
],
"file_path": "serve.go",
"type": "add",
"edit_start_line_idx": 75
} | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"container/list"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/codegangsta/cli"
"github.com/gogits/gogs/modules/log"
"github.com/gogits/git"
"github.com/gogits/gogs/models"
"github.com/gogits/gogs/modules/base"
)
var (
COMMANDS_READONLY = map[string]int{
"git-upload-pack": models.AU_WRITABLE,
"git upload-pack": models.AU_WRITABLE,
"git-upload-archive": models.AU_WRITABLE,
}
COMMANDS_WRITE = map[string]int{
"git-receive-pack": models.AU_READABLE,
"git receive-pack": models.AU_READABLE,
}
)
var CmdServ = cli.Command{
Name: "serv",
Usage: "This command just should be called by ssh shell",
Description: `
gogs serv provide access auth for repositories`,
Action: runServ,
Flags: []cli.Flag{},
}
func init() {
level := "0"
os.MkdirAll("log", os.ModePerm)
log.NewLogger(10000, "file", fmt.Sprintf(`{"level":%s,"filename":"%s"}`, level, "log/serv.log"))
log.Trace("start logging...")
}
func parseCmd(cmd string) (string, string) {
ss := strings.SplitN(cmd, " ", 2)
if len(ss) != 2 {
return "", ""
}
verb, args := ss[0], ss[1]
if verb == "git" {
ss = strings.SplitN(args, " ", 2)
args = ss[1]
verb = fmt.Sprintf("%s %s", verb, ss[0])
}
return verb, args
}
func In(b string, sl map[string]int) bool {
_, e := sl[b]
return e
}
func runServ(k *cli.Context) {
log.Trace("new serv request " + log.Mode + ":" + log.Config)
base.NewConfigContext()
models.LoadModelsConfig()
models.NewEngine()
keys := strings.Split(os.Args[2], "-")
if len(keys) != 2 {
fmt.Println("auth file format error")
log.Error("auth file format error")
return
}
keyId, err := strconv.ParseInt(keys[1], 10, 64)
if err != nil {
fmt.Println("auth file format error")
log.Error("auth file format error")
return
}
user, err := models.GetUserByKeyId(keyId)
if err != nil {
fmt.Println("You have no right to access")
log.Error("You have no right to access")
return
}
cmd := os.Getenv("SSH_ORIGINAL_COMMAND")
if cmd == "" {
println("Hi", user.Name, "! You've successfully authenticated, but Gogs does not provide shell access.")
return
}
verb, args := parseCmd(cmd)
rRepo := strings.Trim(args, "'")
rr := strings.SplitN(rRepo, "/", 2)
if len(rr) != 2 {
println("Unavilable repository", args)
log.Error("Unavilable repository %v", args)
return
}
repoName := rr[1]
if strings.HasSuffix(repoName, ".git") {
repoName = repoName[:len(repoName)-4]
}
isWrite := In(verb, COMMANDS_WRITE)
isRead := In(verb, COMMANDS_READONLY)
repo, err := models.GetRepositoryByName(user.Id, repoName)
var isExist bool = true
if err != nil {
if err == models.ErrRepoNotExist {
isExist = false
if isRead {
println("Repository", user.Name+"/"+repoName, "is not exist")
log.Error("Repository " + user.Name + "/" + repoName + " is not exist")
return
}
} else {
println("Get repository error:", err)
log.Error("Get repository error: " + err.Error())
return
}
}
// access check
switch {
case isWrite:
has, err := models.HasAccess(user.Name, repoName, models.AU_WRITABLE)
if err != nil {
println("Inernel error:", err)
log.Error(err.Error())
return
}
if !has {
println("You have no right to write this repository")
log.Error("You have no right to access this repository")
return
}
case isRead:
has, err := models.HasAccess(user.Name, repoName, models.AU_READABLE)
if err != nil {
println("Inernel error")
log.Error(err.Error())
return
}
if !has {
has, err = models.HasAccess(user.Name, repoName, models.AU_WRITABLE)
if err != nil {
println("Inernel error")
log.Error(err.Error())
return
}
}
if !has {
println("You have no right to access this repository")
log.Error("You have no right to access this repository")
return
}
default:
println("Unknown command")
log.Error("Unknown command")
return
}
var rep *git.Repository
repoPath := models.RepoPath(user.Name, repoName)
if !isExist {
if isWrite {
_, err = models.CreateRepository(user, repoName, "", "", "", false, true)
if err != nil {
println("Create repository failed")
log.Error("Create repository failed: " + err.Error())
return
}
}
}
rep, err = git.OpenRepository(repoPath)
if err != nil {
println("OpenRepository failed:", err.Error())
log.Error("OpenRepository failed: " + err.Error())
return
}
refs, err := rep.AllReferencesMap()
if err != nil {
println("Get All References failed:", err.Error())
log.Error("Get All References failed: " + err.Error())
return
}
gitcmd := exec.Command(verb, rRepo)
gitcmd.Dir = base.RepoRootPath
var s string
b := bytes.NewBufferString(s)
gitcmd.Stdout = io.MultiWriter(os.Stdout, b)
//gitcmd.Stdin = io.MultiReader(os.Stdin, b)
gitcmd.Stdin = os.Stdin
gitcmd.Stderr = os.Stderr
if err = gitcmd.Run(); err != nil {
println("execute command error:", err.Error())
log.Error("execute command error: " + err.Error())
return
}
if isRead {
return
}
time.Sleep(time.Second)
// find push reference name
var t = "ok refs/heads/"
var i int
var refname string
for {
l, err := b.ReadString('\n')
if err != nil {
break
}
i = i + 1
l = l[:len(l)-1]
idx := strings.Index(l, t)
if idx > 0 {
refname = l[idx+len(t):]
}
}
if refname == "" {
println("No find any reference name:", b.String())
log.Error("No find any reference name: " + b.String())
return
}
var ref *git.Reference
var ok bool
var l *list.List
//log.Info("----", refname, "-----")
if ref, ok = refs[refname]; !ok {
// for new branch
refs, err = rep.AllReferencesMap()
if err != nil {
println("Get All References failed:", err.Error())
log.Error("Get All References failed: " + err.Error())
return
}
if ref, ok = refs[refname]; !ok {
log.Error("unknow reference name -", refname, "-", b.String())
log.Error("unknow reference name -", refname, "-", b.String())
return
}
l, err = ref.AllCommits()
if err != nil {
println("Get All Commits failed:", err.Error())
log.Error("Get All Commits failed: " + err.Error())
return
}
} else {
//log.Info("----", ref, "-----")
var last *git.Commit
//log.Info("00000", ref.Oid.String())
last, err = ref.LastCommit()
if err != nil {
println("Get last commit failed:", err.Error())
log.Error("Get last commit failed: " + err.Error())
return
}
ref2, err := rep.LookupReference(ref.Name)
if err != nil {
println("look up reference failed:", err.Error())
log.Error("look up reference failed: " + err.Error())
return
}
//log.Info("11111", ref2.Oid.String())
before, err := ref2.LastCommit()
if err != nil {
println("Get last commit failed:", err.Error())
log.Error("Get last commit failed: " + err.Error())
return
}
//log.Info("----", before.Id(), "-----", last.Id())
l = ref.CommitsBetween(before, last)
}
commits := make([][]string, 0)
var maxCommits = 3
for e := l.Front(); e != nil; e = e.Next() {
commit := e.Value.(*git.Commit)
commits = append(commits, []string{commit.Id().String(), commit.Message()})
if len(commits) >= maxCommits {
break
}
}
if err = models.CommitRepoAction(user.Id, user.Name,
repo.Id, repoName, refname, &base.PushCommits{l.Len(), commits}); err != nil {
log.Error("runUpdate.models.CommitRepoAction: %v", err, commits)
} else {
c := exec.Command("git", "update-server-info")
c.Dir = repoPath
err := c.Run()
if err != nil {
log.Error("update-server-info: %v", err)
}
}
}
| serve.go | 1 | https://github.com/gogs/gogs/commit/197c4d4a5ba8a9540c49879324194f5f6be4689c | [
0.9990878105163574,
0.06123228371143341,
0.00016605319979134947,
0.0001719760912237689,
0.23809508979320526
] |
{
"id": 9,
"code_window": [
"\t_, e := sl[b]\n",
"\treturn e\n",
"}\n",
"\n",
"func runServ(k *cli.Context) {\n",
"\tlog.Trace(\"new serv request \" + log.Mode + \":\" + log.Config)\n",
"\n",
"\tbase.NewConfigContext()\n",
"\tmodels.LoadModelsConfig()\n",
"\tmodels.NewEngine()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\texecDir, _ := base.ExecDir()\n",
"\tnewLogger(execDir)\n"
],
"file_path": "serve.go",
"type": "add",
"edit_start_line_idx": 75
} | Copyright (c) 2014
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | LICENSE | 0 | https://github.com/gogs/gogs/commit/197c4d4a5ba8a9540c49879324194f5f6be4689c | [
0.0001731228403514251,
0.00017002892855089158,
0.00016530012362636626,
0.0001716638362267986,
0.0000033964104204642354
] |
{
"id": 9,
"code_window": [
"\t_, e := sl[b]\n",
"\treturn e\n",
"}\n",
"\n",
"func runServ(k *cli.Context) {\n",
"\tlog.Trace(\"new serv request \" + log.Mode + \":\" + log.Config)\n",
"\n",
"\tbase.NewConfigContext()\n",
"\tmodels.LoadModelsConfig()\n",
"\tmodels.NewEngine()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\texecDir, _ := base.ExecDir()\n",
"\tnewLogger(execDir)\n"
],
"file_path": "serve.go",
"type": "add",
"edit_start_line_idx": 75
} | {{template "base/head" .}}
{{template "base/navbar" .}}
<div id="body" class="container" data-page="user">
{{template "user/setting_nav" .}}
<div id="user-setting-container" class="col-md-9">
<h4>Security</h4>
</div>
</div>
{{template "base/footer" .}} | templates/user/security.tmpl | 0 | https://github.com/gogs/gogs/commit/197c4d4a5ba8a9540c49879324194f5f6be4689c | [
0.00017258900334127247,
0.00017258900334127247,
0.00017258900334127247,
0.00017258900334127247,
0
] |
{
"id": 9,
"code_window": [
"\t_, e := sl[b]\n",
"\treturn e\n",
"}\n",
"\n",
"func runServ(k *cli.Context) {\n",
"\tlog.Trace(\"new serv request \" + log.Mode + \":\" + log.Config)\n",
"\n",
"\tbase.NewConfigContext()\n",
"\tmodels.LoadModelsConfig()\n",
"\tmodels.NewEngine()\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\texecDir, _ := base.ExecDir()\n",
"\tnewLogger(execDir)\n"
],
"file_path": "serve.go",
"type": "add",
"edit_start_line_idx": 75
} | // Copyright 2014 The Gogs Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package models
import (
"strings"
"time"
)
// Access types.
const (
AU_READABLE = iota + 1
AU_WRITABLE
)
// Access represents the accessibility of user and repository.
type Access struct {
Id int64
UserName string `xorm:"unique(s)"`
RepoName string `xorm:"unique(s)"`
Mode int `xorm:"unique(s)"`
Created time.Time `xorm:"created"`
}
// AddAccess adds new access record.
func AddAccess(access *Access) error {
_, err := orm.Insert(access)
return err
}
// HasAccess returns true if someone can read or write given repository.
func HasAccess(userName, repoName string, mode int) (bool, error) {
return orm.Get(&Access{
Id: 0,
UserName: strings.ToLower(userName),
RepoName: strings.ToLower(repoName),
Mode: mode,
})
}
| models/access.go | 0 | https://github.com/gogs/gogs/commit/197c4d4a5ba8a9540c49879324194f5f6be4689c | [
0.00017617843695916235,
0.00017301517073065042,
0.00016675131337251514,
0.00017562496941536665,
0.000003820025540335337
] |
{
"id": 0,
"code_window": [
"\tcore \"k8s.io/client-go/testing\"\n",
"\tpolicy \"k8s.io/kubernetes/pkg/apis/policy\"\n",
")\n",
"\n",
"func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.GetActionImpl{}\n",
"\taction.Verb = \"post\"\n",
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n"
],
"file_path": "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 25
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
core "k8s.io/client-go/testing"
)
func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {
action := core.GetActionImpl{}
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
action.Name = eviction.Name
_, err := c.Fake.Invokes(action, eviction)
return err
}
| staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go | 1 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.9981998205184937,
0.4980311393737793,
0.0001671124919084832,
0.49687883257865906,
0.4978618621826172
] |
{
"id": 0,
"code_window": [
"\tcore \"k8s.io/client-go/testing\"\n",
"\tpolicy \"k8s.io/kubernetes/pkg/apis/policy\"\n",
")\n",
"\n",
"func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.GetActionImpl{}\n",
"\taction.Verb = \"post\"\n",
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n"
],
"file_path": "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 25
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
const (
KubernetesPodNameLabel = "io.kubernetes.pod.name"
KubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace"
KubernetesPodUIDLabel = "io.kubernetes.pod.uid"
KubernetesContainerNameLabel = "io.kubernetes.container.name"
KubernetesContainerTypeLabel = "io.kubernetes.container.type"
)
func GetContainerName(labels map[string]string) string {
return labels[KubernetesContainerNameLabel]
}
func GetPodName(labels map[string]string) string {
return labels[KubernetesPodNameLabel]
}
func GetPodUID(labels map[string]string) string {
return labels[KubernetesPodUIDLabel]
}
func GetPodNamespace(labels map[string]string) string {
return labels[KubernetesPodNamespaceLabel]
}
| pkg/kubelet/types/labels.go | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.005938957445323467,
0.0013841046020388603,
0.00016387154755648226,
0.00017686837236396968,
0.0022803449537605047
] |
{
"id": 0,
"code_window": [
"\tcore \"k8s.io/client-go/testing\"\n",
"\tpolicy \"k8s.io/kubernetes/pkg/apis/policy\"\n",
")\n",
"\n",
"func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.GetActionImpl{}\n",
"\taction.Verb = \"post\"\n",
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n"
],
"file_path": "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 25
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduling
import (
"fmt"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
_ "github.com/stretchr/testify/assert"
)
const (
defaultTimeout = 3 * time.Minute
)
var _ = framework.KubeDescribe("EquivalenceCache [Serial]", func() {
var cs clientset.Interface
var nodeList *v1.NodeList
var masterNodes sets.String
var systemPodsNo int
var ns string
f := framework.NewDefaultFramework("equivalence-cache")
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
framework.ExpectNoError(framework.CheckTestingNSDeletedExcept(cs, ns))
// Every test case in this suite assumes that cluster add-on pods stay stable and
// cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
systemPods, err := framework.GetPodsInNamespace(cs, ns, map[string]string{})
Expect(err).NotTo(HaveOccurred())
systemPodsNo = 0
for _, pod := range systemPods {
if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil {
systemPodsNo++
}
}
err = framework.WaitForPodsRunningReady(cs, api.NamespaceSystem, int32(systemPodsNo), int32(systemPodsNo), framework.PodReadyBeforeTimeout, map[string]string{})
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {
framework.Logf("\nLogging pods the kubelet thinks is on node %v before test", node.Name)
framework.PrintAllKubeletPods(cs, node.Name)
}
})
// This test verifies that GeneralPredicates works as expected:
// When a replica pod (with HostPorts) is scheduled to a node, it will invalidate GeneralPredicates cache on this node,
// so that subsequent replica pods with same host port claim will be rejected.
// We enforce all replica pods bind to the same node so there will always be conflicts.
It("validates GeneralPredicates is properly invalidated when a pod is scheduled [Slow]", func() {
By("Launching a RC with two replica pods with HostPorts")
nodeName := getNodeThatCanRunPodWithoutToleration(f)
rcName := "host-port"
// bind all replicas to same node
nodeSelector := map[string]string{"kubernetes.io/hostname": nodeName}
By("One pod should be scheduled, the other should be rejected")
// CreateNodeSelectorPods creates RC with host port 4312
WaitForSchedulerAfterAction(f, func() error {
err := CreateNodeSelectorPods(f, rcName, 2, nodeSelector, false)
return err
}, ns, rcName, false)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, rcName)
// the first replica pod is scheduled, and the second pod will be rejected.
verifyResult(cs, 1, 1, ns)
})
// This test verifies that MatchInterPodAffinity works as expected.
// In equivalence cache, it does not handle inter pod affinity (anti-affinity) specially (unless node label changed),
// because current predicates algorithm will ensure newly scheduled pod does not break existing affinity in cluster.
It("validates pod affinity works properly when new replica pod is scheduled", func() {
// create a pod running with label {security: S1}, and choose this node
nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)
By("Trying to apply a random label on the found node.")
// we need to use real failure domains, since scheduler only know them
k := "failure-domain.beta.kubernetes.io/zone"
v := "equivalence-e2e-test"
oldValue := framework.AddOrUpdateLabelOnNodeAndReturnOldValue(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
// restore the node label
defer framework.AddOrUpdateLabelOnNode(cs, nodeName, k, oldValue)
By("Trying to schedule RC with Pod Affinity should success.")
framework.WaitForStableCluster(cs, masterNodes)
affinityRCName := "with-pod-affinity-" + string(uuid.NewUUID())
replica := 2
labelsMap := map[string]string{
"name": affinityRCName,
}
affinity := &v1.Affinity{
PodAffinity: &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "security",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S1"},
},
},
},
TopologyKey: k,
Namespaces: []string{ns},
},
},
},
}
rc := getRCWithInterPodAffinity(affinityRCName, labelsMap, replica, affinity, imageutils.GetPauseImageName())
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, affinityRCName)
// RC should be running successfully
// TODO: WaitForSchedulerAfterAction() can on be used to wait for failure event,
// not for successful RC, since no specific pod name can be provided.
_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForControlledPodsRunning(cs, ns, affinityRCName, api.Kind("ReplicationController")))
By("Remove node failure domain label")
framework.RemoveLabelOffNode(cs, nodeName, k)
By("Trying to schedule another equivalent Pod should fail due to node label has been removed.")
// use scale to create another equivalent pod and wait for failure event
WaitForSchedulerAfterAction(f, func() error {
err := framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, affinityRCName, uint(replica+1), false)
return err
}, ns, affinityRCName, false)
// and this new pod should be rejected since node label has been updated
verifyReplicasResult(cs, replica, 1, ns, affinityRCName)
})
// This test verifies that MatchInterPodAffinity (anti-affinity) is respected as expected.
It("validates pod anti-affinity works properly when new replica pod is scheduled", func() {
By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, "host-port")
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
framework.ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2))
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
Expect(nodeNames[0]).ToNot(Equal(nodeNames[1]))
By("Applying a random label to both nodes.")
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
v := "equivalence-e2etest"
for _, nodeName := range nodeNames {
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
}
By("Trying to launch a pod with the service label on the selected nodes.")
// run a pod with label {"service": "S1"} and expect it to be running
runPausePod(f, pausePodConfig{
Name: "with-label-" + string(uuid.NewUUID()),
Labels: map[string]string{"service": "S1"},
NodeSelector: map[string]string{k: v}, // only launch on our two nodes
})
By("Trying to launch RC with podAntiAffinity on these two nodes should be rejected.")
labelRCName := "with-podantiaffinity-" + string(uuid.NewUUID())
replica := 2
labelsMap := map[string]string{
"name": labelRCName,
}
affinity := &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "service",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S1"},
},
},
},
TopologyKey: k,
Namespaces: []string{ns},
},
},
},
}
rc := getRCWithInterPodAffinityNodeSelector(labelRCName, labelsMap, replica, affinity,
imageutils.GetPauseImageName(), map[string]string{k: v})
defer framework.DeleteRCAndWaitForGC(f.ClientSet, ns, labelRCName)
WaitForSchedulerAfterAction(f, func() error {
_, err := cs.CoreV1().ReplicationControllers(ns).Create(rc)
return err
}, ns, labelRCName, false)
// these two replicas should all be rejected since podAntiAffinity says it they anit-affinity with pod {"service": "S1"}
verifyReplicasResult(cs, 0, replica, ns, labelRCName)
})
})
// getRCWithInterPodAffinity returns RC with given affinity rules.
func getRCWithInterPodAffinity(name string, labelsMap map[string]string, replica int, affinity *v1.Affinity, image string) *v1.ReplicationController {
return getRCWithInterPodAffinityNodeSelector(name, labelsMap, replica, affinity, image, map[string]string{})
}
// getRCWithInterPodAffinity returns RC with given affinity rules and node selector.
func getRCWithInterPodAffinityNodeSelector(name string, labelsMap map[string]string, replica int, affinity *v1.Affinity, image string, nodeSelector map[string]string) *v1.ReplicationController {
replicaInt32 := int32(replica)
return &v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &replicaInt32,
Selector: labelsMap,
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labelsMap,
},
Spec: v1.PodSpec{
Affinity: affinity,
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
DNSPolicy: v1.DNSDefault,
NodeSelector: nodeSelector,
},
},
},
}
}
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) error {
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: imageutils.GetPauseImageName(),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
NodeSelector: nodeSelector,
}
err := framework.RunRC(*config)
if expectRunning {
return err
}
return nil
}
| test/e2e/scheduling/equivalence_cache_predicates.go | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.009544831700623035,
0.0005071429186500609,
0.00016411783872172236,
0.00017520025721751153,
0.0016788396751508117
] |
{
"id": 0,
"code_window": [
"\tcore \"k8s.io/client-go/testing\"\n",
"\tpolicy \"k8s.io/kubernetes/pkg/apis/policy\"\n",
")\n",
"\n",
"func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.GetActionImpl{}\n",
"\taction.Verb = \"post\"\n",
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n"
],
"file_path": "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 25
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
"fmt"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
v1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
v1beta1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1beta1"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=wardle.k8s.io, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("fischers"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Wardle().V1alpha1().Fischers().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("flunders"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Wardle().V1alpha1().Flunders().Informer()}, nil
// Group=wardle.k8s.io, Version=v1beta1
case v1beta1.SchemeGroupVersion.WithResource("flunders"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Wardle().V1beta1().Flunders().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}
| staging/src/k8s.io/sample-apiserver/pkg/client/informers/externalversions/generic.go | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.0027767347637563944,
0.0005415870691649616,
0.0001639471884118393,
0.00016575468180235475,
0.0009125100914388895
] |
{
"id": 1,
"code_window": [
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n",
"\treturn err\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Object = eviction\n",
"\n"
],
"file_path": "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go",
"type": "add",
"edit_start_line_idx": 30
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
core "k8s.io/client-go/testing"
)
func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {
action := core.GetActionImpl{}
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
action.Name = eviction.Name
_, err := c.Fake.Invokes(action, eviction)
return err
}
| staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go | 1 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.9958196878433228,
0.4893072843551636,
0.0001765862834872678,
0.48061642050743103,
0.4892844557762146
] |
{
"id": 1,
"code_window": [
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n",
"\treturn err\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Object = eviction\n",
"\n"
],
"file_path": "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go",
"type": "add",
"edit_start_line_idx": 30
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"auth_options.go",
"doc.go",
"endpoint_search.go",
"errors.go",
"params.go",
"provider_client.go",
"results.go",
"service_client.go",
"util.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/gophercloud/gophercloud",
importpath = "github.com/gophercloud/gophercloud",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//vendor/github.com/gophercloud/gophercloud/openstack:all-srcs",
"//vendor/github.com/gophercloud/gophercloud/pagination:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/gophercloud/gophercloud/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.0001764834305504337,
0.00017535974620841444,
0.00017380113422404975,
0.00017557721002958715,
0.0000011568312174858875
] |
{
"id": 1,
"code_window": [
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n",
"\treturn err\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Object = eviction\n",
"\n"
],
"file_path": "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go",
"type": "add",
"edit_start_line_idx": 30
} | /*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.api.autoscaling.v1;
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
message CrossVersionObjectReference {
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
optional string kind = 1;
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
optional string name = 2;
// API version of the referent
// +optional
optional string apiVersion = 3;
}
// ExternalMetricSource indicates how to scale on a metric not associated with
// any Kubernetes object (for example length of queue in cloud
// messaging service, or QPS from loadbalancer running outside of cluster).
message ExternalMetricSource {
// metricName is the name of the metric in question.
optional string metricName = 1;
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
// targetValue is the target value of the metric (as a quantity).
// Mutually exclusive with TargetAverageValue.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
// targetAverageValue is the target per-pod value of global metric (as a quantity).
// Mutually exclusive with TargetValue.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4;
}
// ExternalMetricStatus indicates the current value of a global metric
// not associated with any Kubernetes object.
message ExternalMetricStatus {
// metricName is the name of a metric used for autoscaling in
// metric system.
optional string metricName = 1;
// metricSelector is used to identify a specific time series
// within a given metric.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2;
// currentValue is the current value of the metric (as a quantity)
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
// currentAverageValue is the current value of metric averaged over autoscaled pods.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4;
}
// configuration of a horizontal pod autoscaler.
message HorizontalPodAutoscaler {
// Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
// +optional
optional HorizontalPodAutoscalerSpec spec = 2;
// current information about the autoscaler.
// +optional
optional HorizontalPodAutoscalerStatus status = 3;
}
// HorizontalPodAutoscalerCondition describes the state of
// a HorizontalPodAutoscaler at a certain point.
message HorizontalPodAutoscalerCondition {
// type describes the current condition
optional string type = 1;
// status is the status of the condition (True, False, Unknown)
optional string status = 2;
// lastTransitionTime is the last time the condition transitioned from
// one status to another
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
// reason is the reason for the condition's last transition.
// +optional
optional string reason = 4;
// message is a human-readable explanation containing details about
// the transition
// +optional
optional string message = 5;
}
// list of horizontal pod autoscaler objects.
message HorizontalPodAutoscalerList {
// Standard list metadata.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// list of horizontal pod autoscaler objects.
repeated HorizontalPodAutoscaler items = 2;
}
// specification of a horizontal pod autoscaler.
message HorizontalPodAutoscalerSpec {
// reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption
// and will set the desired number of pods by using its Scale subresource.
optional CrossVersionObjectReference scaleTargetRef = 1;
// lower limit for the number of pods that can be set by the autoscaler, default 1.
// +optional
optional int32 minReplicas = 2;
// upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.
optional int32 maxReplicas = 3;
// target average CPU utilization (represented as a percentage of requested CPU) over all the pods;
// if not specified the default autoscaling policy will be used.
// +optional
optional int32 targetCPUUtilizationPercentage = 4;
}
// current status of a horizontal pod autoscaler
message HorizontalPodAutoscalerStatus {
// most recent generation observed by this autoscaler.
// +optional
optional int64 observedGeneration = 1;
// last time the HorizontalPodAutoscaler scaled the number of pods;
// used by the autoscaler to control how often the number of pods is changed.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2;
// current number of replicas of pods managed by this autoscaler.
optional int32 currentReplicas = 3;
// desired number of replicas of pods managed by this autoscaler.
optional int32 desiredReplicas = 4;
// current average CPU utilization over all pods, represented as a percentage of requested CPU,
// e.g. 70 means that an average pod is using now 70% of its requested CPU.
// +optional
optional int32 currentCPUUtilizationPercentage = 5;
}
// MetricSpec specifies how to scale based on a single metric
// (only `type` and one other matching field should be set at once).
message MetricSpec {
// type is the type of metric source. It should be one of "Object",
// "Pods" or "Resource", each mapping to a matching field in the object.
optional string type = 1;
// object refers to a metric describing a single kubernetes object
// (for example, hits-per-second on an Ingress object).
// +optional
optional ObjectMetricSource object = 2;
// pods refers to a metric describing each pod in the current scale target
// (for example, transactions-processed-per-second). The values will be
// averaged together before being compared to the target value.
// +optional
optional PodsMetricSource pods = 3;
// resource refers to a resource metric (such as those specified in
// requests and limits) known to Kubernetes describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
// +optional
optional ResourceMetricSource resource = 4;
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
optional ExternalMetricSource external = 5;
}
// MetricStatus describes the last-read state of a single metric.
message MetricStatus {
// type is the type of metric source. It will be one of "Object",
// "Pods" or "Resource", each corresponds to a matching field in the object.
optional string type = 1;
// object refers to a metric describing a single kubernetes object
// (for example, hits-per-second on an Ingress object).
// +optional
optional ObjectMetricStatus object = 2;
// pods refers to a metric describing each pod in the current scale target
// (for example, transactions-processed-per-second). The values will be
// averaged together before being compared to the target value.
// +optional
optional PodsMetricStatus pods = 3;
// resource refers to a resource metric (such as those specified in
// requests and limits) known to Kubernetes describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available
// to normal per-pod metrics using the "pods" source.
// +optional
optional ResourceMetricStatus resource = 4;
// external refers to a global metric that is not associated
// with any Kubernetes object. It allows autoscaling based on information
// coming from components running outside of cluster
// (for example length of queue in cloud messaging service, or
// QPS from loadbalancer running outside of cluster).
// +optional
optional ExternalMetricStatus external = 5;
}
// ObjectMetricSource indicates how to scale on a metric describing a
// kubernetes object (for example, hits-per-second on an Ingress object).
message ObjectMetricSource {
// target is the described Kubernetes object.
optional CrossVersionObjectReference target = 1;
// metricName is the name of the metric in question.
optional string metricName = 2;
// targetValue is the target value of the metric (as a quantity).
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3;
// selector is the string-encoded form of a standard kubernetes label selector for the given metric.
// When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
// When unset, just the metricName will be used to gather metrics.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
// averageValue is the target value of the average of the
// metric across all relevant pods (as a quantity)
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
}
// ObjectMetricStatus indicates the current value of a metric describing a
// kubernetes object (for example, hits-per-second on an Ingress object).
message ObjectMetricStatus {
// target is the described Kubernetes object.
optional CrossVersionObjectReference target = 1;
// metricName is the name of the metric in question.
optional string metricName = 2;
// currentValue is the current value of the metric (as a quantity).
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3;
// selector is the string-encoded form of a standard kubernetes label selector for the given metric
// When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
// When unset, just the metricName will be used to gather metrics.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4;
// averageValue is the current value of the average of the
// metric across all relevant pods (as a quantity)
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5;
}
// PodsMetricSource indicates how to scale on a metric describing each pod in
// the current scale target (for example, transactions-processed-per-second).
// The values will be averaged together before being compared to the target
// value.
message PodsMetricSource {
// metricName is the name of the metric in question
optional string metricName = 1;
// targetAverageValue is the target value of the average of the
// metric across all relevant pods (as a quantity)
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2;
// selector is the string-encoded form of a standard kubernetes label selector for the given metric
// When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping
// When unset, just the metricName will be used to gather metrics.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
}
// PodsMetricStatus indicates the current value of a metric describing each pod in
// the current scale target (for example, transactions-processed-per-second).
message PodsMetricStatus {
// metricName is the name of the metric in question
optional string metricName = 1;
// currentAverageValue is the current value of the average of the
// metric across all relevant pods (as a quantity)
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2;
// selector is the string-encoded form of a standard kubernetes label selector for the given metric
// When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping.
// When unset, just the metricName will be used to gather metrics.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
}
// ResourceMetricSource indicates how to scale on a resource metric known to
// Kubernetes, as specified in requests and limits, describing each pod in the
// current scale target (e.g. CPU or memory). The values will be averaged
// together before being compared to the target. Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source. Only one "target" type
// should be set.
message ResourceMetricSource {
// name is the name of the resource in question.
optional string name = 1;
// targetAverageUtilization is the target value of the average of the
// resource metric across all relevant pods, represented as a percentage of
// the requested value of the resource for the pods.
// +optional
optional int32 targetAverageUtilization = 2;
// targetAverageValue is the target value of the average of the
// resource metric across all relevant pods, as a raw value (instead of as
// a percentage of the request), similar to the "pods" metric source type.
// +optional
optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3;
}
// ResourceMetricStatus indicates the current value of a resource metric known to
// Kubernetes, as specified in requests and limits, describing each pod in the
// current scale target (e.g. CPU or memory). Such metrics are built in to
// Kubernetes, and have special scaling options on top of those available to
// normal per-pod metrics using the "pods" source.
message ResourceMetricStatus {
// name is the name of the resource in question.
optional string name = 1;
// currentAverageUtilization is the current value of the average of the
// resource metric across all relevant pods, represented as a percentage of
// the requested value of the resource for the pods. It will only be
// present if `targetAverageValue` was set in the corresponding metric
// specification.
// +optional
optional int32 currentAverageUtilization = 2;
// currentAverageValue is the current value of the average of the
// resource metric across all relevant pods, as a raw value (instead of as
// a percentage of the request), similar to the "pods" metric source type.
// It will always be set, regardless of the corresponding metric specification.
optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3;
}
// Scale represents a scaling request for a resource.
message Scale {
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
// +optional
optional ScaleSpec spec = 2;
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
// +optional
optional ScaleStatus status = 3;
}
// ScaleSpec describes the attributes of a scale subresource.
message ScaleSpec {
// desired number of instances for the scaled object.
// +optional
optional int32 replicas = 1;
}
// ScaleStatus represents the current status of a scale subresource.
message ScaleStatus {
// actual number of observed instances of the scaled object.
optional int32 replicas = 1;
// label query over pods that should match the replicas count. This is same
// as the label selector but in the string format to avoid introspection
// by clients. The string will be in the same format as the query-param syntax.
// More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
optional string selector = 2;
}
| staging/src/k8s.io/api/autoscaling/v1/generated.proto | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.00026103074196726084,
0.00017339357873424888,
0.00016285873425658792,
0.00017077295342460275,
0.000014959049622120801
] |
{
"id": 1,
"code_window": [
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n",
"\treturn err\n",
"}"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Object = eviction\n",
"\n"
],
"file_path": "pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go",
"type": "add",
"edit_start_line_idx": 30
} | This file is autogenerated, but we've stopped checking such files into the
repository to reduce the need for rebases. Please run hack/generate-docs.sh to
populate this file.
| docs/admin/kubeadm_init_phase_certs_ca.md | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.00017400576325599104,
0.00017400576325599104,
0.00017400576325599104,
0.00017400576325599104,
0
] |
{
"id": 2,
"code_window": [
"func (c *FakePods) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n",
"\taction.Resource = podsResource\n",
"\taction.Subresource = \"eviction\"\n",
"\taction.Object = eviction\n",
"\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Namespace = c.ns\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go",
"type": "add",
"edit_start_line_idx": 62
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
core "k8s.io/client-go/testing"
)
func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {
action := core.GetActionImpl{}
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
action.Name = eviction.Name
_, err := c.Fake.Invokes(action, eviction)
return err
}
| staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go | 1 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.9667252898216248,
0.2439410239458084,
0.00016343945753760636,
0.004437696188688278,
0.41731420159339905
] |
{
"id": 2,
"code_window": [
"func (c *FakePods) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n",
"\taction.Resource = podsResource\n",
"\taction.Subresource = \"eviction\"\n",
"\taction.Object = eviction\n",
"\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Namespace = c.ns\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go",
"type": "add",
"edit_start_line_idx": 62
} | load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"config.go",
"initializer.go",
],
importpath = "k8s.io/kubernetes/pkg/kubeapiserver/admission",
visibility = ["//visibility:public"],
deps = [
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/quota/v1:go_default_library",
"//pkg/quota/v1/install:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission/plugin/webhook/initializer:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["initializer_test.go"],
embed = [":go_default_library"],
deps = ["//staging/src/k8s.io/apiserver/pkg/admission:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubeapiserver/admission/util:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| pkg/kubeapiserver/admission/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.00017766143719200045,
0.0001762086758390069,
0.00017366530664730817,
0.00017671224486548454,
0.0000012891918004243053
] |
{
"id": 2,
"code_window": [
"func (c *FakePods) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n",
"\taction.Resource = podsResource\n",
"\taction.Subresource = \"eviction\"\n",
"\taction.Object = eviction\n",
"\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Namespace = c.ns\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go",
"type": "add",
"edit_start_line_idx": 62
} | apiVersion: v1
kind: Service
metadata:
name: prune-svc
labels:
prune-group: "true"
spec:
selector:
prune-group-nomatch: "true"
ports:
- port: 80
protocol: TCP
| hack/testdata/prune/svc.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.00017838506028056145,
0.00017728704551700503,
0.0001761890307534486,
0.00017728704551700503,
0.0000010980147635564208
] |
{
"id": 2,
"code_window": [
"func (c *FakePods) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n",
"\taction.Resource = podsResource\n",
"\taction.Subresource = \"eviction\"\n",
"\taction.Object = eviction\n",
"\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Namespace = c.ns\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/core/v1/fake/fake_pod_expansion.go",
"type": "add",
"edit_start_line_idx": 62
} | load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"date.go",
"time.go",
"timerfc1123.go",
"unixtime.go",
"utility.go",
],
importmap = "k8s.io/kubernetes/vendor/github.com/Azure/go-autorest/autorest/date",
importpath = "github.com/Azure/go-autorest/autorest/date",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
| vendor/github.com/Azure/go-autorest/autorest/date/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.00017909334565047175,
0.00017789901176001877,
0.00017657819262240082,
0.00017802551155909896,
0.0000010306953299732413
] |
{
"id": 3,
"code_window": [
"\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n",
"\tcore \"k8s.io/client-go/testing\"\n",
")\n",
"\n",
"func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.GetActionImpl{}\n",
"\taction.Verb = \"post\"\n",
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 25
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
policy "k8s.io/api/policy/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
core "k8s.io/client-go/testing"
)
func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {
action := core.GetActionImpl{}
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
action.Subresource = "eviction"
action.Name = eviction.Name
_, err := c.Fake.Invokes(action, eviction)
return err
}
| staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go | 1 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.9983420372009277,
0.49784767627716064,
0.00016688668983988464,
0.4964408576488495,
0.4976794123649597
] |
{
"id": 3,
"code_window": [
"\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n",
"\tcore \"k8s.io/client-go/testing\"\n",
")\n",
"\n",
"func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.GetActionImpl{}\n",
"\taction.Verb = \"post\"\n",
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 25
} | package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"conversion.go",
"defaults.go",
"doc.go",
"register.go",
"zz_generated.conversion.go",
"zz_generated.defaults.go",
],
importpath = "k8s.io/kubernetes/pkg/apis/apps/v1beta1",
deps = [
"//pkg/apis/apps:go_default_library",
"//pkg/apis/autoscaling:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/v1:go_default_library",
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_test(
name = "go_default_test",
srcs = ["defaults_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/apps/install:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//staging/src/k8s.io/api/apps/v1beta1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)
| pkg/apis/apps/v1beta1/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.00042591479723341763,
0.00022819537844043225,
0.00017333030700683594,
0.00019377717399038374,
0.00008340015483554453
] |
{
"id": 3,
"code_window": [
"\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n",
"\tcore \"k8s.io/client-go/testing\"\n",
")\n",
"\n",
"func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.GetActionImpl{}\n",
"\taction.Verb = \"post\"\n",
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 25
} | package jmespath
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
"unicode/utf8"
)
type token struct {
tokenType tokType
value string
position int
length int
}
type tokType int
const eof = -1
// Lexer contains information about the expression being tokenized.
type Lexer struct {
expression string // The expression provided by the user.
currentPos int // The current position in the string.
lastWidth int // The width of the current rune. This
buf bytes.Buffer // Internal buffer used for building up values.
}
// SyntaxError is the main error used whenever a lexing or parsing error occurs.
type SyntaxError struct {
msg string // Error message displayed to user
Expression string // Expression that generated a SyntaxError
Offset int // The location in the string where the error occurred
}
func (e SyntaxError) Error() string {
// In the future, it would be good to underline the specific
// location where the error occurred.
return "SyntaxError: " + e.msg
}
// HighlightLocation will show where the syntax error occurred.
// It will place a "^" character on a line below the expression
// at the point where the syntax error occurred.
func (e SyntaxError) HighlightLocation() string {
return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
}
//go:generate stringer -type=tokType
const (
tUnknown tokType = iota
tStar
tDot
tFilter
tFlatten
tLparen
tRparen
tLbracket
tRbracket
tLbrace
tRbrace
tOr
tPipe
tNumber
tUnquotedIdentifier
tQuotedIdentifier
tComma
tColon
tLT
tLTE
tGT
tGTE
tEQ
tNE
tJSONLiteral
tStringLiteral
tCurrent
tExpref
tAnd
tNot
tEOF
)
var basicTokens = map[rune]tokType{
'.': tDot,
'*': tStar,
',': tComma,
':': tColon,
'{': tLbrace,
'}': tRbrace,
']': tRbracket, // tLbracket not included because it could be "[]"
'(': tLparen,
')': tRparen,
'@': tCurrent,
}
// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
// When using this bitmask just be sure to shift the rune down 64 bits
// before checking against identifierStartBits.
const identifierStartBits uint64 = 576460745995190270
// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
var whiteSpace = map[rune]bool{
' ': true, '\t': true, '\n': true, '\r': true,
}
func (t token) String() string {
return fmt.Sprintf("Token{%+v, %s, %d, %d}",
t.tokenType, t.value, t.position, t.length)
}
// NewLexer creates a new JMESPath lexer.
func NewLexer() *Lexer {
lexer := Lexer{}
return &lexer
}
func (lexer *Lexer) next() rune {
if lexer.currentPos >= len(lexer.expression) {
lexer.lastWidth = 0
return eof
}
r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
lexer.lastWidth = w
lexer.currentPos += w
return r
}
func (lexer *Lexer) back() {
lexer.currentPos -= lexer.lastWidth
}
func (lexer *Lexer) peek() rune {
t := lexer.next()
lexer.back()
return t
}
// tokenize takes an expression and returns corresponding tokens.
func (lexer *Lexer) tokenize(expression string) ([]token, error) {
var tokens []token
lexer.expression = expression
lexer.currentPos = 0
lexer.lastWidth = 0
loop:
for {
r := lexer.next()
if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
t := lexer.consumeUnquotedIdentifier()
tokens = append(tokens, t)
} else if val, ok := basicTokens[r]; ok {
// Basic single char token.
t := token{
tokenType: val,
value: string(r),
position: lexer.currentPos - lexer.lastWidth,
length: 1,
}
tokens = append(tokens, t)
} else if r == '-' || (r >= '0' && r <= '9') {
t := lexer.consumeNumber()
tokens = append(tokens, t)
} else if r == '[' {
t := lexer.consumeLBracket()
tokens = append(tokens, t)
} else if r == '"' {
t, err := lexer.consumeQuotedIdentifier()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '\'' {
t, err := lexer.consumeRawStringLiteral()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '`' {
t, err := lexer.consumeLiteral()
if err != nil {
return tokens, err
}
tokens = append(tokens, t)
} else if r == '|' {
t := lexer.matchOrElse(r, '|', tOr, tPipe)
tokens = append(tokens, t)
} else if r == '<' {
t := lexer.matchOrElse(r, '=', tLTE, tLT)
tokens = append(tokens, t)
} else if r == '>' {
t := lexer.matchOrElse(r, '=', tGTE, tGT)
tokens = append(tokens, t)
} else if r == '!' {
t := lexer.matchOrElse(r, '=', tNE, tNot)
tokens = append(tokens, t)
} else if r == '=' {
t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
tokens = append(tokens, t)
} else if r == '&' {
t := lexer.matchOrElse(r, '&', tAnd, tExpref)
tokens = append(tokens, t)
} else if r == eof {
break loop
} else if _, ok := whiteSpace[r]; ok {
// Ignore whitespace
} else {
return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
}
}
tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
return tokens, nil
}
// Consume characters until the ending rune "r" is reached.
// If the end of the expression is reached before seeing the
// terminating rune "r", then an error is returned.
// If no error occurs then the matching substring is returned.
// The returned string will not include the ending rune.
func (lexer *Lexer) consumeUntil(end rune) (string, error) {
start := lexer.currentPos
current := lexer.next()
for current != end && current != eof {
if current == '\\' && lexer.peek() != eof {
lexer.next()
}
current = lexer.next()
}
if lexer.lastWidth == 0 {
// Then we hit an EOF so we never reached the closing
// delimiter.
return "", SyntaxError{
msg: "Unclosed delimiter: " + string(end),
Expression: lexer.expression,
Offset: len(lexer.expression),
}
}
return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
}
func (lexer *Lexer) consumeLiteral() (token, error) {
start := lexer.currentPos
value, err := lexer.consumeUntil('`')
if err != nil {
return token{}, err
}
value = strings.Replace(value, "\\`", "`", -1)
return token{
tokenType: tJSONLiteral,
value: value,
position: start,
length: len(value),
}, nil
}
func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
start := lexer.currentPos
currentIndex := start
current := lexer.next()
for current != '\'' && lexer.peek() != eof {
if current == '\\' && lexer.peek() == '\'' {
chunk := lexer.expression[currentIndex : lexer.currentPos-1]
lexer.buf.WriteString(chunk)
lexer.buf.WriteString("'")
lexer.next()
currentIndex = lexer.currentPos
}
current = lexer.next()
}
if lexer.lastWidth == 0 {
// Then we hit an EOF so we never reached the closing
// delimiter.
return token{}, SyntaxError{
msg: "Unclosed delimiter: '",
Expression: lexer.expression,
Offset: len(lexer.expression),
}
}
if currentIndex < lexer.currentPos {
lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
}
value := lexer.buf.String()
// Reset the buffer so it can reused again.
lexer.buf.Reset()
return token{
tokenType: tStringLiteral,
value: value,
position: start,
length: len(value),
}, nil
}
func (lexer *Lexer) syntaxError(msg string) SyntaxError {
return SyntaxError{
msg: msg,
Expression: lexer.expression,
Offset: lexer.currentPos - 1,
}
}
// Checks for a two char token, otherwise matches a single character
// token. This is used whenever a two char token overlaps a single
// char token, e.g. "||" -> tPipe, "|" -> tOr.
func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
start := lexer.currentPos - lexer.lastWidth
nextRune := lexer.next()
var t token
if nextRune == second {
t = token{
tokenType: matchedType,
value: string(first) + string(second),
position: start,
length: 2,
}
} else {
lexer.back()
t = token{
tokenType: singleCharType,
value: string(first),
position: start,
length: 1,
}
}
return t
}
func (lexer *Lexer) consumeLBracket() token {
// There's three options here:
// 1. A filter expression "[?"
// 2. A flatten operator "[]"
// 3. A bare rbracket "["
start := lexer.currentPos - lexer.lastWidth
nextRune := lexer.next()
var t token
if nextRune == '?' {
t = token{
tokenType: tFilter,
value: "[?",
position: start,
length: 2,
}
} else if nextRune == ']' {
t = token{
tokenType: tFlatten,
value: "[]",
position: start,
length: 2,
}
} else {
t = token{
tokenType: tLbracket,
value: "[",
position: start,
length: 1,
}
lexer.back()
}
return t
}
func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
start := lexer.currentPos
value, err := lexer.consumeUntil('"')
if err != nil {
return token{}, err
}
var decoded string
asJSON := []byte("\"" + value + "\"")
if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
return token{}, err
}
return token{
tokenType: tQuotedIdentifier,
value: decoded,
position: start - 1,
length: len(decoded),
}, nil
}
func (lexer *Lexer) consumeUnquotedIdentifier() token {
// Consume runes until we reach the end of an unquoted
// identifier.
start := lexer.currentPos - lexer.lastWidth
for {
r := lexer.next()
if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
lexer.back()
break
}
}
value := lexer.expression[start:lexer.currentPos]
return token{
tokenType: tUnquotedIdentifier,
value: value,
position: start,
length: lexer.currentPos - start,
}
}
func (lexer *Lexer) consumeNumber() token {
// Consume runes until we reach something that's not a number.
start := lexer.currentPos - lexer.lastWidth
for {
r := lexer.next()
if r < '0' || r > '9' {
lexer.back()
break
}
}
value := lexer.expression[start:lexer.currentPos]
return token{
tokenType: tNumber,
value: value,
position: start,
length: lexer.currentPos - start,
}
}
| vendor/github.com/jmespath/go-jmespath/lexer.go | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.0002542792644817382,
0.0001725399779388681,
0.00016503207734785974,
0.00017091547488234937,
0.000012862861694884486
] |
{
"id": 3,
"code_window": [
"\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n",
"\tcore \"k8s.io/client-go/testing\"\n",
")\n",
"\n",
"func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {\n",
"\taction := core.GetActionImpl{}\n",
"\taction.Verb = \"post\"\n",
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction := core.CreateActionImpl{}\n",
"\taction.Verb = \"create\"\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 25
} | ## Container Specification - v1
This is the standard configuration for version 1 containers. It includes
namespaces, standard filesystem setup, a default Linux capability set, and
information about resource reservations. It also has information about any
populated environment settings for the processes running inside a container.
Along with the configuration of how a container is created the standard also
discusses actions that can be performed on a container to manage and inspect
information about the processes running inside.
The v1 profile is meant to be able to accommodate the majority of applications
with a strong security configuration.
### System Requirements and Compatibility
Minimum requirements:
* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches)
* Mounted cgroups with each subsystem in its own hierarchy
### Namespaces
| Flag | Enabled |
| ------------ | ------- |
| CLONE_NEWPID | 1 |
| CLONE_NEWUTS | 1 |
| CLONE_NEWIPC | 1 |
| CLONE_NEWNET | 1 |
| CLONE_NEWNS | 1 |
| CLONE_NEWUSER | 1 |
Namespaces are created for the container via the `clone` syscall.
### Filesystem
A root filesystem must be provided to a container for execution. The container
will use this root filesystem (rootfs) to jail and spawn processes inside where
the binaries and system libraries are local to that directory. Any binaries
to be executed must be contained within this rootfs.
Mounts that happen inside the container are automatically cleaned up when the
container exits as the mount namespace is destroyed and the kernel will
unmount all the mounts that were setup within that namespace.
For a container to execute properly there are certain filesystems that
are required to be mounted within the rootfs that the runtime will setup.
| Path | Type | Flags | Data |
| ----------- | ------ | -------------------------------------- | ---------------------------------------- |
| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 |
| /dev/shm | tmpfs | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k |
| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid=5 |
| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | |
After a container's filesystems are mounted within the newly created
mount namespace `/dev` will need to be populated with a set of device nodes.
It is expected that a rootfs does not need to have any device nodes specified
for `/dev` within the rootfs as the container will setup the correct devices
that are required for executing a container's process.
| Path | Mode | Access |
| ------------ | ---- | ---------- |
| /dev/null | 0666 | rwm |
| /dev/zero | 0666 | rwm |
| /dev/full | 0666 | rwm |
| /dev/tty | 0666 | rwm |
| /dev/random | 0666 | rwm |
| /dev/urandom | 0666 | rwm |
**ptmx**
`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within
the container.
The use of a pseudo TTY is optional within a container and it should support both.
If a pseudo is provided to the container `/dev/console` will need to be
setup by binding the console in `/dev/` after it has been populated and mounted
in tmpfs.
| Source | Destination | UID GID | Mode | Type |
| --------------- | ------------ | ------- | ---- | ---- |
| *pty host path* | /dev/console | 0 0 | 0600 | bind |
After `/dev/null` has been setup we check for any external links between
the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing
to `/dev/null` outside the container we close and `dup2` the `/dev/null`
that is local to the container's rootfs.
After the container has `/proc` mounted a few standard symlinks are setup
within `/dev/` for the io.
| Source | Destination |
| --------------- | ----------- |
| /proc/self/fd | /dev/fd |
| /proc/self/fd/0 | /dev/stdin |
| /proc/self/fd/1 | /dev/stdout |
| /proc/self/fd/2 | /dev/stderr |
A `pivot_root` is used to change the root for the process, effectively
jailing the process inside the rootfs.
```c
put_old = mkdir(...);
pivot_root(rootfs, put_old);
chdir("/");
unmount(put_old, MS_DETACH);
rmdir(put_old);
```
For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined
with a `chroot` is required as `pivot_root` is not supported in `ramfs`.
```c
mount(rootfs, "/", NULL, MS_MOVE, NULL);
chroot(".");
chdir("/");
```
The `umask` is set back to `0022` after the filesystem setup has been completed.
### Resources
Cgroups are used to handle resource allocation for containers. This includes
system resources like cpu, memory, and device access.
| Subsystem | Enabled |
| ---------- | ------- |
| devices | 1 |
| memory | 1 |
| cpu | 1 |
| cpuacct | 1 |
| cpuset | 1 |
| blkio | 1 |
| perf_event | 1 |
| freezer | 1 |
| hugetlb | 1 |
| pids | 1 |
All cgroup subsystem are joined so that statistics can be collected from
each of the subsystems. Freezer does not expose any stats but is joined
so that containers can be paused and resumed.
The parent process of the container's init must place the init pid inside
the correct cgroups before the initialization begins. This is done so
that no processes or threads escape the cgroups. This sync is
done via a pipe ( specified in the runtime section below ) that the container's
init process will block waiting for the parent to finish setup.
### IntelRdt
Intel platforms with new Xeon CPU support Intel Resource Director Technology
(RDT). Cache Allocation Technology (CAT) is a sub-feature of RDT, which
currently supports L3 cache resource allocation.
This feature provides a way for the software to restrict cache allocation to a
defined 'subset' of L3 cache which may be overlapping with other 'subsets'.
The different subsets are identified by class of service (CLOS) and each CLOS
has a capacity bitmask (CBM).
It can be used to handle L3 cache resource allocation for containers if
hardware and kernel support Intel RDT/CAT.
In Linux 4.10 kernel or newer, the interface is defined and exposed via
"resource control" filesystem, which is a "cgroup-like" interface.
Comparing with cgroups, it has similar process management lifecycle and
interfaces in a container. But unlike cgroups' hierarchy, it has single level
filesystem layout.
Intel RDT "resource control" filesystem hierarchy:
```
mount -t resctrl resctrl /sys/fs/resctrl
tree /sys/fs/resctrl
/sys/fs/resctrl/
|-- info
| |-- L3
| |-- cbm_mask
| |-- min_cbm_bits
| |-- num_closids
|-- cpus
|-- schemata
|-- tasks
|-- <container_id>
|-- cpus
|-- schemata
|-- tasks
```
For runc, we can make use of `tasks` and `schemata` configuration for L3 cache
resource constraints.
The file `tasks` has a list of tasks that belongs to this group (e.g.,
<container_id>" group). Tasks can be added to a group by writing the task ID
to the "tasks" file (which will automatically remove them from the previous
group to which they belonged). New tasks created by fork(2) and clone(2) are
added to the same group as their parent. If a pid is not in any sub group, it
is in root group.
The file `schemata` has allocation masks/values for L3 cache on each socket,
which contains L3 cache id and capacity bitmask (CBM).
```
Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
```
For example, on a two-socket machine, L3's schema line could be `L3:0=ff;1=c0`
Which means L3 cache id 0's CBM is 0xff, and L3 cache id 1's CBM is 0xc0.
The valid L3 cache CBM is a *contiguous bits set* and number of bits that can
be set is less than the max bit. The max bits in the CBM is varied among
supported Intel Xeon platforms. In Intel RDT "resource control" filesystem
layout, the CBM in a group should be a subset of the CBM in root. Kernel will
check if it is valid when writing. e.g., 0xfffff in root indicates the max bits
of CBM is 20 bits, which mapping to entire L3 cache capacity. Some valid CBM
values to set in a group: 0xf, 0xf0, 0x3ff, 0x1f00 and etc.
For more information about Intel RDT/CAT kernel interface:
https://www.kernel.org/doc/Documentation/x86/intel_rdt_ui.txt
An example for runc:
```
Consider a two-socket machine with two L3 caches where the default CBM is
0xfffff and the max CBM length is 20 bits. With this configuration, tasks
inside the container only have access to the "upper" 80% of L3 cache id 0 and
the "lower" 50% L3 cache id 1:
"linux": {
"intelRdt": {
"l3CacheSchema": "L3:0=ffff0;1=3ff"
}
}
```
### Security
The standard set of Linux capabilities that are set in a container
provide a good default for security and flexibility for the applications.
| Capability | Enabled |
| -------------------- | ------- |
| CAP_NET_RAW | 1 |
| CAP_NET_BIND_SERVICE | 1 |
| CAP_AUDIT_READ | 1 |
| CAP_AUDIT_WRITE | 1 |
| CAP_DAC_OVERRIDE | 1 |
| CAP_SETFCAP | 1 |
| CAP_SETPCAP | 1 |
| CAP_SETGID | 1 |
| CAP_SETUID | 1 |
| CAP_MKNOD | 1 |
| CAP_CHOWN | 1 |
| CAP_FOWNER | 1 |
| CAP_FSETID | 1 |
| CAP_KILL | 1 |
| CAP_SYS_CHROOT | 1 |
| CAP_NET_BROADCAST | 0 |
| CAP_SYS_MODULE | 0 |
| CAP_SYS_RAWIO | 0 |
| CAP_SYS_PACCT | 0 |
| CAP_SYS_ADMIN | 0 |
| CAP_SYS_NICE | 0 |
| CAP_SYS_RESOURCE | 0 |
| CAP_SYS_TIME | 0 |
| CAP_SYS_TTY_CONFIG | 0 |
| CAP_AUDIT_CONTROL | 0 |
| CAP_MAC_OVERRIDE | 0 |
| CAP_MAC_ADMIN | 0 |
| CAP_NET_ADMIN | 0 |
| CAP_SYSLOG | 0 |
| CAP_DAC_READ_SEARCH | 0 |
| CAP_LINUX_IMMUTABLE | 0 |
| CAP_IPC_LOCK | 0 |
| CAP_IPC_OWNER | 0 |
| CAP_SYS_PTRACE | 0 |
| CAP_SYS_BOOT | 0 |
| CAP_LEASE | 0 |
| CAP_WAKE_ALARM | 0 |
| CAP_BLOCK_SUSPEND | 0 |
Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor)
and [selinux](http://selinuxproject.org/page/Main_Page) can be used with
the containers. A container should support setting an apparmor profile or
selinux process and mount labels if provided in the configuration.
Standard apparmor profile:
```c
#include <tunables/global>
profile <profile_name> flags=(attach_disconnected,mediate_deleted) {
#include <abstractions/base>
network,
capability,
file,
umount,
deny @{PROC}/sys/fs/** wklx,
deny @{PROC}/sysrq-trigger rwklx,
deny @{PROC}/mem rwklx,
deny @{PROC}/kmem rwklx,
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
deny @{PROC}/sys/kernel/*/** wklx,
deny mount,
deny /sys/[^f]*/** wklx,
deny /sys/f[^s]*/** wklx,
deny /sys/fs/[^c]*/** wklx,
deny /sys/fs/c[^g]*/** wklx,
deny /sys/fs/cg[^r]*/** wklx,
deny /sys/firmware/efi/efivars/** rwklx,
deny /sys/kernel/security/** rwklx,
}
```
*TODO: seccomp work is being done to find a good default config*
### Runtime and Init Process
During container creation the parent process needs to talk to the container's init
process and have a form of synchronization. This is accomplished by creating
a pipe that is passed to the container's init. When the init process first spawns
it will block on its side of the pipe until the parent closes its side. This
allows the parent to have time to set the new process inside a cgroup hierarchy
and/or write any uid/gid mappings required for user namespaces.
The pipe is passed to the init process via FD 3.
The application consuming libcontainer should be compiled statically. libcontainer
does not define any init process and the arguments provided are used to `exec` the
process inside the application. There should be no long running init within the
container spec.
If a pseudo tty is provided to a container it will open and `dup2` the console
as the container's STDIN, STDOUT, STDERR as well as mounting the console
as `/dev/console`.
An extra set of mounts are provided to a container and setup for use. A container's
rootfs can contain some non portable files inside that can cause side effects during
execution of a process. These files are usually created and populated with the container
specific information via the runtime.
**Extra runtime files:**
* /etc/hosts
* /etc/resolv.conf
* /etc/hostname
* /etc/localtime
#### Defaults
There are a few defaults that can be overridden by users, but in their omission
these apply to processes within a container.
| Type | Value |
| ------------------- | ------------------------------ |
| Parent Death Signal | SIGKILL |
| UID | 0 |
| GID | 0 |
| GROUPS | 0, NULL |
| CWD | "/" |
| $HOME | Current user's home dir or "/" |
| Readonly rootfs | false |
| Pseudo TTY | false |
## Actions
After a container is created there is a standard set of actions that can
be done to the container. These actions are part of the public API for
a container.
| Action | Description |
| -------------- | ------------------------------------------------------------------ |
| Get processes | Return all the pids for processes running inside a container |
| Get Stats | Return resource statistics for the container as a whole |
| Wait | Waits on the container's init process ( pid 1 ) |
| Wait Process | Wait on any of the container's processes returning the exit status |
| Destroy | Kill the container's init process and remove any filesystem state |
| Signal | Send a signal to the container's init process |
| Signal Process | Send a signal to any of the container's processes |
| Pause | Pause all processes inside the container |
| Resume | Resume all processes inside the container if paused |
| Exec | Execute a new process inside of the container ( requires setns ) |
| Set | Setup configs of the container after it's created |
### Execute a new process inside of a running container
User can execute a new process inside of a running container. Any binaries to be
executed must be accessible within the container's rootfs.
The started process will run inside the container's rootfs. Any changes
made by the process to the container's filesystem will persist after the
process finished executing.
The started process will join all the container's existing namespaces. When the
container is paused, the process will also be paused and will resume when
the container is unpaused. The started process will only run when the container's
primary process (PID 1) is running, and will not be restarted when the container
is restarted.
#### Planned additions
The started process will have its own cgroups nested inside the container's
cgroups. This is used for process tracking and optionally resource allocation
handling for the new process. Freezer cgroup is required, the rest of the cgroups
are optional. The process executor must place its pid inside the correct
cgroups before starting the process. This is done so that no child processes or
threads can escape the cgroups.
When the process is stopped, the process executor will try (in a best-effort way)
to stop all its children and remove the sub-cgroups.
| vendor/github.com/opencontainers/runc/libcontainer/SPEC.md | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.00017794404993765056,
0.00016728848277125508,
0.00015965261263772845,
0.00016606965800747275,
0.000005163365131011233
] |
{
"id": 4,
"code_window": [
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n",
"\taction.Name = eviction.Name\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n",
"\treturn err\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Object = eviction\n",
"\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 30
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"k8s.io/apimachinery/pkg/runtime/schema"
core "k8s.io/client-go/testing"
policy "k8s.io/kubernetes/pkg/apis/policy"
)
func (c *FakeEvictions) Evict(eviction *policy.Eviction) error {
action := core.GetActionImpl{}
action.Verb = "post"
action.Namespace = c.ns
action.Resource = schema.GroupVersionResource{Group: "", Version: "", Resource: "pods"}
action.Subresource = "eviction"
_, err := c.Fake.Invokes(action, eviction)
return err
}
| pkg/client/clientset_generated/internalclientset/typed/policy/internalversion/fake/fake_eviction_expansion.go | 1 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.9971231818199158,
0.2587595582008362,
0.00017826569091994315,
0.01886840909719467,
0.4265674650669098
] |
{
"id": 4,
"code_window": [
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n",
"\taction.Name = eviction.Name\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n",
"\treturn err\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Object = eviction\n",
"\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 30
} | // Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/types/mount.proto
package types
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Mount describes mounts for a container.
//
// This type is the lingua franca of ContainerD. All services provide mounts
// to be used with the container at creation time.
//
// The Mount type follows the structure of the mount syscall, including a type,
// source, target and options.
type Mount struct {
// Type defines the nature of the mount.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
// Target path in container
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
// Options specifies zero or more fstab style mount options.
Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
func (*Mount) ProtoMessage() {}
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
func init() {
proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
}
func (m *Mount) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Type) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Type)))
i += copy(dAtA[i:], m.Type)
}
if len(m.Source) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Source)))
i += copy(dAtA[i:], m.Source)
}
if len(m.Target) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Target)))
i += copy(dAtA[i:], m.Target)
}
if len(m.Options) > 0 {
for _, s := range m.Options {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Mount) Size() (n int) {
var l int
_ = l
l = len(m.Type)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
l = len(m.Source)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
l = len(m.Target)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
if len(m.Options) > 0 {
for _, s := range m.Options {
l = len(s)
n += 1 + l + sovMount(uint64(l))
}
}
return n
}
func sovMount(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozMount(x uint64) (n int) {
return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Mount) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Mount{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
`Options:` + fmt.Sprintf("%v", this.Options) + `,`,
`}`,
}, "")
return s
}
func valueToStringMount(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Mount) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Mount: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Source = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Target = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Options = append(m.Options, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMount(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMount
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMount(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthMount
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipMount(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMount = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount)
}
var fileDescriptorMount = []byte{
// 202 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
}
| vendor/github.com/containerd/containerd/api/types/mount.pb.go | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.0009318708325736225,
0.0002037181257037446,
0.0001625007134862244,
0.0001754631957737729,
0.00012221552606206387
] |
{
"id": 4,
"code_window": [
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n",
"\taction.Name = eviction.Name\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n",
"\treturn err\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Object = eviction\n",
"\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 30
} | 1.0
| test/images/redis/VERSION | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.00017800649220589548,
0.00017800649220589548,
0.00017800649220589548,
0.00017800649220589548,
0
] |
{
"id": 4,
"code_window": [
"\taction.Namespace = c.ns\n",
"\taction.Resource = schema.GroupVersionResource{Group: \"\", Version: \"v1\", Resource: \"pods\"}\n",
"\taction.Subresource = \"eviction\"\n",
"\taction.Name = eviction.Name\n",
"\t_, err := c.Fake.Invokes(action, eviction)\n",
"\treturn err\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\taction.Object = eviction\n",
"\n"
],
"file_path": "staging/src/k8s.io/client-go/kubernetes/typed/policy/v1beta1/fake/fake_eviction_expansion.go",
"type": "replace",
"edit_start_line_idx": 30
} | // Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
timeMu sync.Mutex
lasttime uint64 // last time we returned
clockSeq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// clock sequence as well as adjusting the clock sequence as needed. An error
// is returned if the current time cannot be determined.
func GetTime() (Time, uint16, error) {
defer timeMu.Unlock()
timeMu.Lock()
return getTime()
}
func getTime() (Time, uint16, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clockSeq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), clockSeq, nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence is used, a new
// random clock sequence is generated the first time a clock sequence is
// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
func ClockSequence() int {
defer timeMu.Unlock()
timeMu.Lock()
return clockSequence()
}
func clockSequence() int {
if clockSeq == 0 {
setClockSequence(-1)
}
return int(clockSeq & 0x3fff)
}
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer timeMu.Unlock()
timeMu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
old_seq := clockSeq
clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if old_seq != clockSeq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs.
func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time)
}
// ClockSequence returns the clock sequence encoded in uuid.
// The clock sequence is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) ClockSequence() int {
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
}
| vendor/github.com/google/uuid/time.go | 0 | https://github.com/kubernetes/kubernetes/commit/869adf8bd7e9ec7d8bba8317cb987d760d83833d | [
0.0001795295102056116,
0.0001724928879411891,
0.00016603845870122313,
0.000170923289260827,
0.0000038915654840820935
] |
{
"id": 0,
"code_window": [
"\n",
"func (self *TagsController) checkout(tag *models.Tag) error {\n",
"\tself.c.LogAction(self.c.Tr.Actions.CheckoutTag)\n",
"\tif err := self.c.Helpers().Refs.CheckoutRef(tag.Name, types.CheckoutRefOptions{}); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\treturn self.c.PushContext(self.c.Contexts().Branches)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := self.c.Helpers().Refs.CheckoutRef(tag.FullRefName(), types.CheckoutRefOptions{}); err != nil {\n"
],
"file_path": "pkg/gui/controllers/tags_controller.go",
"type": "replace",
"edit_start_line_idx": 85
} | package controllers
import (
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/gui/context"
"github.com/jesseduffield/lazygit/pkg/gui/types"
"github.com/jesseduffield/lazygit/pkg/utils"
)
type TagsController struct {
baseController
c *ControllerCommon
}
var _ types.IController = &TagsController{}
func NewTagsController(
common *ControllerCommon,
) *TagsController {
return &TagsController{
baseController: baseController{},
c: common,
}
}
func (self *TagsController) GetKeybindings(opts types.KeybindingsOpts) []*types.Binding {
bindings := []*types.Binding{
{
Key: opts.GetKey(opts.Config.Universal.Select),
Handler: self.withSelectedTag(self.checkout),
Description: self.c.Tr.Checkout,
},
{
Key: opts.GetKey(opts.Config.Universal.Remove),
Handler: self.withSelectedTag(self.delete),
Description: self.c.Tr.ViewDeleteOptions,
OpensMenu: true,
},
{
Key: opts.GetKey(opts.Config.Branches.PushTag),
Handler: self.withSelectedTag(self.push),
Description: self.c.Tr.PushTag,
},
{
Key: opts.GetKey(opts.Config.Universal.New),
Handler: self.create,
Description: self.c.Tr.CreateTag,
},
{
Key: opts.GetKey(opts.Config.Commits.ViewResetOptions),
Handler: self.withSelectedTag(self.createResetMenu),
Description: self.c.Tr.ViewResetOptions,
OpensMenu: true,
},
}
return bindings
}
func (self *TagsController) GetOnRenderToMain() func() error {
return func() error {
return self.c.Helpers().Diff.WithDiffModeCheck(func() error {
var task types.UpdateTask
tag := self.context().GetSelected()
if tag == nil {
task = types.NewRenderStringTask("No tags")
} else {
cmdObj := self.c.Git().Branch.GetGraphCmdObj(tag.FullRefName())
task = types.NewRunCommandTask(cmdObj.GetCmd())
}
return self.c.RenderToMainViews(types.RefreshMainOpts{
Pair: self.c.MainViewPairs().Normal,
Main: &types.ViewUpdateOpts{
Title: "Tag",
Task: task,
},
})
})
}
}
func (self *TagsController) checkout(tag *models.Tag) error {
self.c.LogAction(self.c.Tr.Actions.CheckoutTag)
if err := self.c.Helpers().Refs.CheckoutRef(tag.Name, types.CheckoutRefOptions{}); err != nil {
return err
}
return self.c.PushContext(self.c.Contexts().Branches)
}
func (self *TagsController) localDelete(tag *models.Tag) error {
return self.c.WithWaitingStatus(self.c.Tr.DeletingStatus, func(gocui.Task) error {
self.c.LogAction(self.c.Tr.Actions.DeleteLocalTag)
err := self.c.Git().Tag.LocalDelete(tag.Name)
_ = self.c.Refresh(types.RefreshOptions{Mode: types.ASYNC, Scope: []types.RefreshableView{types.COMMITS, types.TAGS}})
return err
})
}
func (self *TagsController) remoteDelete(tag *models.Tag) error {
title := utils.ResolvePlaceholderString(
self.c.Tr.SelectRemoteTagUpstream,
map[string]string{
"tagName": tag.Name,
},
)
return self.c.Prompt(types.PromptOpts{
Title: title,
InitialContent: "origin",
FindSuggestionsFunc: self.c.Helpers().Suggestions.GetRemoteSuggestionsFunc(),
HandleConfirm: func(upstream string) error {
confirmTitle := utils.ResolvePlaceholderString(
self.c.Tr.DeleteTagTitle,
map[string]string{
"tagName": tag.Name,
},
)
confirmPrompt := utils.ResolvePlaceholderString(
self.c.Tr.DeleteRemoteTagPrompt,
map[string]string{
"tagName": tag.Name,
"upstream": upstream,
},
)
return self.c.Confirm(types.ConfirmOpts{
Title: confirmTitle,
Prompt: confirmPrompt,
HandleConfirm: func() error {
return self.c.WithInlineStatus(tag, types.ItemOperationDeleting, context.TAGS_CONTEXT_KEY, func(task gocui.Task) error {
self.c.LogAction(self.c.Tr.Actions.DeleteRemoteTag)
if err := self.c.Git().Remote.DeleteRemoteTag(task, upstream, tag.Name); err != nil {
return err
}
self.c.Toast(self.c.Tr.RemoteTagDeletedMessage)
return self.c.Refresh(types.RefreshOptions{Mode: types.ASYNC, Scope: []types.RefreshableView{types.COMMITS, types.TAGS}})
})
},
})
},
})
}
func (self *TagsController) delete(tag *models.Tag) error {
menuTitle := utils.ResolvePlaceholderString(
self.c.Tr.DeleteTagTitle,
map[string]string{
"tagName": tag.Name,
},
)
menuItems := []*types.MenuItem{
{
Label: self.c.Tr.DeleteLocalTag,
Key: 'c',
OnPress: func() error {
return self.localDelete(tag)
},
},
{
Label: self.c.Tr.DeleteRemoteTag,
Key: 'r',
OpensMenu: true,
OnPress: func() error {
return self.remoteDelete(tag)
},
},
}
return self.c.Menu(types.CreateMenuOptions{
Title: menuTitle,
Items: menuItems,
})
}
func (self *TagsController) push(tag *models.Tag) error {
title := utils.ResolvePlaceholderString(
self.c.Tr.PushTagTitle,
map[string]string{
"tagName": tag.Name,
},
)
return self.c.Prompt(types.PromptOpts{
Title: title,
InitialContent: "origin",
FindSuggestionsFunc: self.c.Helpers().Suggestions.GetRemoteSuggestionsFunc(),
HandleConfirm: func(response string) error {
return self.c.WithInlineStatus(tag, types.ItemOperationPushing, context.TAGS_CONTEXT_KEY, func(task gocui.Task) error {
self.c.LogAction(self.c.Tr.Actions.PushTag)
err := self.c.Git().Tag.Push(task, response, tag.Name)
// Render again to remove the inline status:
self.c.OnUIThread(func() error {
_ = self.c.Contexts().Tags.HandleRender()
return nil
})
return err
})
},
})
}
func (self *TagsController) createResetMenu(tag *models.Tag) error {
return self.c.Helpers().Refs.CreateGitResetMenu(tag.Name)
}
func (self *TagsController) create() error {
// leaving commit SHA blank so that we're just creating the tag for the current commit
return self.c.Helpers().Tags.OpenCreateTagPrompt("", func() { self.context().SetSelectedLineIdx(0) })
}
func (self *TagsController) withSelectedTag(f func(tag *models.Tag) error) func() error {
return func() error {
tag := self.context().GetSelected()
if tag == nil {
return nil
}
return f(tag)
}
}
func (self *TagsController) Context() types.Context {
return self.context()
}
func (self *TagsController) context() *context.TagsContext {
return self.c.Contexts().Tags
}
| pkg/gui/controllers/tags_controller.go | 1 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.9981738328933716,
0.09250658005475998,
0.00016672619676683098,
0.005191809497773647,
0.2722414433956146
] |
{
"id": 0,
"code_window": [
"\n",
"func (self *TagsController) checkout(tag *models.Tag) error {\n",
"\tself.c.LogAction(self.c.Tr.Actions.CheckoutTag)\n",
"\tif err := self.c.Helpers().Refs.CheckoutRef(tag.Name, types.CheckoutRefOptions{}); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\treturn self.c.PushContext(self.c.Contexts().Branches)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := self.c.Helpers().Refs.CheckoutRef(tag.FullRefName(), types.CheckoutRefOptions{}); err != nil {\n"
],
"file_path": "pkg/gui/controllers/tags_controller.go",
"type": "replace",
"edit_start_line_idx": 85
} | // mkerrors.sh -Wall -Werror -static -I/tmp/mips64le/include
// Code generated by the command above; see README.md. DO NOT EDIT.
//go:build mips64le && linux
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
// cgo -godefs -- -Wall -Werror -static -I/tmp/mips64le/include _const.go
package unix
import "syscall"
const (
B1000000 = 0x1008
B115200 = 0x1002
B1152000 = 0x1009
B1500000 = 0x100a
B2000000 = 0x100b
B230400 = 0x1003
B2500000 = 0x100c
B3000000 = 0x100d
B3500000 = 0x100e
B4000000 = 0x100f
B460800 = 0x1004
B500000 = 0x1005
B57600 = 0x1001
B576000 = 0x1006
B921600 = 0x1007
BLKALIGNOFF = 0x2000127a
BLKBSZGET = 0x40081270
BLKBSZSET = 0x80081271
BLKDISCARD = 0x20001277
BLKDISCARDZEROES = 0x2000127c
BLKFLSBUF = 0x20001261
BLKFRAGET = 0x20001265
BLKFRASET = 0x20001264
BLKGETDISKSEQ = 0x40081280
BLKGETSIZE = 0x20001260
BLKGETSIZE64 = 0x40081272
BLKIOMIN = 0x20001278
BLKIOOPT = 0x20001279
BLKPBSZGET = 0x2000127b
BLKRAGET = 0x20001263
BLKRASET = 0x20001262
BLKROGET = 0x2000125e
BLKROSET = 0x2000125d
BLKROTATIONAL = 0x2000127e
BLKRRPART = 0x2000125f
BLKSECDISCARD = 0x2000127d
BLKSECTGET = 0x20001267
BLKSECTSET = 0x20001266
BLKSSZGET = 0x20001268
BLKZEROOUT = 0x2000127f
BOTHER = 0x1000
BS1 = 0x2000
BSDLY = 0x2000
CBAUD = 0x100f
CBAUDEX = 0x1000
CIBAUD = 0x100f0000
CLOCAL = 0x800
CR1 = 0x200
CR2 = 0x400
CR3 = 0x600
CRDLY = 0x600
CREAD = 0x80
CS6 = 0x10
CS7 = 0x20
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
ECHOE = 0x10
ECHOK = 0x20
ECHOKE = 0x800
ECHONL = 0x40
ECHOPRT = 0x400
EFD_CLOEXEC = 0x80000
EFD_NONBLOCK = 0x80
EPOLL_CLOEXEC = 0x80000
EXTPROC = 0x10000
FF1 = 0x8000
FFDLY = 0x8000
FICLONE = 0x80049409
FICLONERANGE = 0x8020940d
FLUSHO = 0x2000
FS_IOC_ENABLE_VERITY = 0x80806685
FS_IOC_GETFLAGS = 0x40086601
FS_IOC_GET_ENCRYPTION_NONCE = 0x4010661b
FS_IOC_GET_ENCRYPTION_POLICY = 0x800c6615
FS_IOC_GET_ENCRYPTION_PWSALT = 0x80106614
FS_IOC_SETFLAGS = 0x80086602
FS_IOC_SET_ENCRYPTION_POLICY = 0x400c6613
F_GETLK = 0xe
F_GETLK64 = 0xe
F_GETOWN = 0x17
F_RDLCK = 0x0
F_SETLK = 0x6
F_SETLK64 = 0x6
F_SETLKW = 0x7
F_SETLKW64 = 0x7
F_SETOWN = 0x18
F_UNLCK = 0x2
F_WRLCK = 0x1
HIDIOCGRAWINFO = 0x40084803
HIDIOCGRDESC = 0x50044802
HIDIOCGRDESCSIZE = 0x40044801
HUPCL = 0x400
ICANON = 0x2
IEXTEN = 0x100
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
IXON = 0x400
MAP_ANON = 0x800
MAP_ANONYMOUS = 0x800
MAP_DENYWRITE = 0x2000
MAP_EXECUTABLE = 0x4000
MAP_GROWSDOWN = 0x1000
MAP_HUGETLB = 0x80000
MAP_LOCKED = 0x8000
MAP_NONBLOCK = 0x20000
MAP_NORESERVE = 0x400
MAP_POPULATE = 0x10000
MAP_RENAME = 0x800
MAP_STACK = 0x40000
MCL_CURRENT = 0x1
MCL_FUTURE = 0x2
MCL_ONFAULT = 0x4
MEMERASE = 0x80084d02
MEMERASE64 = 0x80104d14
MEMGETBADBLOCK = 0x80084d0b
MEMGETINFO = 0x40204d01
MEMGETOOBSEL = 0x40c84d0a
MEMGETREGIONCOUNT = 0x40044d07
MEMISLOCKED = 0x40084d17
MEMLOCK = 0x80084d05
MEMREAD = 0xc0404d1a
MEMREADOOB = 0xc0104d04
MEMSETBADBLOCK = 0x80084d0c
MEMUNLOCK = 0x80084d06
MEMWRITEOOB = 0xc0104d03
MTDFILEMODE = 0x20004d13
NFDBITS = 0x40
NLDLY = 0x100
NOFLSH = 0x80
NS_GET_NSTYPE = 0x2000b703
NS_GET_OWNER_UID = 0x2000b704
NS_GET_PARENT = 0x2000b702
NS_GET_USERNS = 0x2000b701
OLCUC = 0x2
ONLCR = 0x4
OTPERASE = 0x800c4d19
OTPGETREGIONCOUNT = 0x80044d0e
OTPGETREGIONINFO = 0x800c4d0f
OTPLOCK = 0x400c4d10
OTPSELECT = 0x40044d0d
O_APPEND = 0x8
O_ASYNC = 0x1000
O_CLOEXEC = 0x80000
O_CREAT = 0x100
O_DIRECT = 0x8000
O_DIRECTORY = 0x10000
O_DSYNC = 0x10
O_EXCL = 0x400
O_FSYNC = 0x4010
O_LARGEFILE = 0x0
O_NDELAY = 0x80
O_NOATIME = 0x40000
O_NOCTTY = 0x800
O_NOFOLLOW = 0x20000
O_NONBLOCK = 0x80
O_PATH = 0x200000
O_RSYNC = 0x4010
O_SYNC = 0x4010
O_TMPFILE = 0x410000
O_TRUNC = 0x200
PARENB = 0x100
PARODD = 0x200
PENDIN = 0x4000
PERF_EVENT_IOC_DISABLE = 0x20002401
PERF_EVENT_IOC_ENABLE = 0x20002400
PERF_EVENT_IOC_ID = 0x40082407
PERF_EVENT_IOC_MODIFY_ATTRIBUTES = 0x8008240b
PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409
PERF_EVENT_IOC_PERIOD = 0x80082404
PERF_EVENT_IOC_QUERY_BPF = 0xc008240a
PERF_EVENT_IOC_REFRESH = 0x20002402
PERF_EVENT_IOC_RESET = 0x20002403
PERF_EVENT_IOC_SET_BPF = 0x80042408
PERF_EVENT_IOC_SET_FILTER = 0x80082406
PERF_EVENT_IOC_SET_OUTPUT = 0x20002405
PPPIOCATTACH = 0x8004743d
PPPIOCATTCHAN = 0x80047438
PPPIOCBRIDGECHAN = 0x80047435
PPPIOCCONNECT = 0x8004743a
PPPIOCDETACH = 0x8004743c
PPPIOCDISCONN = 0x20007439
PPPIOCGASYNCMAP = 0x40047458
PPPIOCGCHAN = 0x40047437
PPPIOCGDEBUG = 0x40047441
PPPIOCGFLAGS = 0x4004745a
PPPIOCGIDLE = 0x4010743f
PPPIOCGIDLE32 = 0x4008743f
PPPIOCGIDLE64 = 0x4010743f
PPPIOCGL2TPSTATS = 0x40487436
PPPIOCGMRU = 0x40047453
PPPIOCGRASYNCMAP = 0x40047455
PPPIOCGUNIT = 0x40047456
PPPIOCGXASYNCMAP = 0x40207450
PPPIOCSACTIVE = 0x80107446
PPPIOCSASYNCMAP = 0x80047457
PPPIOCSCOMPRESS = 0x8010744d
PPPIOCSDEBUG = 0x80047440
PPPIOCSFLAGS = 0x80047459
PPPIOCSMAXCID = 0x80047451
PPPIOCSMRRU = 0x8004743b
PPPIOCSMRU = 0x80047452
PPPIOCSNPMODE = 0x8008744b
PPPIOCSPASS = 0x80107447
PPPIOCSRASYNCMAP = 0x80047454
PPPIOCSXASYNCMAP = 0x8020744f
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4
PTRACE_GET_WATCH_REGS = 0xd0
PTRACE_OLDSETOPTIONS = 0x15
PTRACE_PEEKDATA_3264 = 0xc1
PTRACE_PEEKTEXT_3264 = 0xc0
PTRACE_POKEDATA_3264 = 0xc3
PTRACE_POKETEXT_3264 = 0xc2
PTRACE_SETFPREGS = 0xf
PTRACE_SET_THREAD_AREA = 0x1a
PTRACE_SET_WATCH_REGS = 0xd1
RLIMIT_AS = 0x6
RLIMIT_MEMLOCK = 0x9
RLIMIT_NOFILE = 0x5
RLIMIT_NPROC = 0x8
RLIMIT_RSS = 0x7
RNDADDENTROPY = 0x80085203
RNDADDTOENTCNT = 0x80045201
RNDCLEARPOOL = 0x20005206
RNDGETENTCNT = 0x40045200
RNDGETPOOL = 0x40085202
RNDRESEEDCRNG = 0x20005207
RNDZAPENTCNT = 0x20005204
RTC_AIE_OFF = 0x20007002
RTC_AIE_ON = 0x20007001
RTC_ALM_READ = 0x40247008
RTC_ALM_SET = 0x80247007
RTC_EPOCH_READ = 0x4008700d
RTC_EPOCH_SET = 0x8008700e
RTC_IRQP_READ = 0x4008700b
RTC_IRQP_SET = 0x8008700c
RTC_PARAM_GET = 0x80187013
RTC_PARAM_SET = 0x80187014
RTC_PIE_OFF = 0x20007006
RTC_PIE_ON = 0x20007005
RTC_PLL_GET = 0x40207011
RTC_PLL_SET = 0x80207012
RTC_RD_TIME = 0x40247009
RTC_SET_TIME = 0x8024700a
RTC_UIE_OFF = 0x20007004
RTC_UIE_ON = 0x20007003
RTC_VL_CLR = 0x20007014
RTC_VL_READ = 0x40047013
RTC_WIE_OFF = 0x20007010
RTC_WIE_ON = 0x2000700f
RTC_WKALM_RD = 0x40287010
RTC_WKALM_SET = 0x8028700f
SCM_TIMESTAMPING = 0x25
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SFD_CLOEXEC = 0x80000
SFD_NONBLOCK = 0x80
SIOCATMARK = 0x40047307
SIOCGPGRP = 0x40047309
SIOCGSTAMPNS_NEW = 0x40108907
SIOCGSTAMP_NEW = 0x40108906
SIOCINQ = 0x467f
SIOCOUTQ = 0x7472
SIOCSPGRP = 0x80047308
SOCK_CLOEXEC = 0x80000
SOCK_DGRAM = 0x1
SOCK_NONBLOCK = 0x80
SOCK_STREAM = 0x2
SOL_SOCKET = 0xffff
SO_ACCEPTCONN = 0x1009
SO_ATTACH_BPF = 0x32
SO_ATTACH_REUSEPORT_CBPF = 0x33
SO_ATTACH_REUSEPORT_EBPF = 0x34
SO_BINDTODEVICE = 0x19
SO_BINDTOIFINDEX = 0x3e
SO_BPF_EXTENSIONS = 0x30
SO_BROADCAST = 0x20
SO_BSDCOMPAT = 0xe
SO_BUF_LOCK = 0x48
SO_BUSY_POLL = 0x2e
SO_BUSY_POLL_BUDGET = 0x46
SO_CNX_ADVICE = 0x35
SO_COOKIE = 0x39
SO_DETACH_REUSEPORT_BPF = 0x44
SO_DOMAIN = 0x1029
SO_DONTROUTE = 0x10
SO_ERROR = 0x1007
SO_INCOMING_CPU = 0x31
SO_INCOMING_NAPI_ID = 0x38
SO_KEEPALIVE = 0x8
SO_LINGER = 0x80
SO_LOCK_FILTER = 0x2c
SO_MARK = 0x24
SO_MAX_PACING_RATE = 0x2f
SO_MEMINFO = 0x37
SO_NETNS_COOKIE = 0x47
SO_NOFCS = 0x2b
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
SO_PEERGROUPS = 0x3b
SO_PEERPIDFD = 0x4d
SO_PEERSEC = 0x1e
SO_PREFER_BUSY_POLL = 0x45
SO_PROTOCOL = 0x1028
SO_RCVBUF = 0x1002
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
SO_RESERVE_MEM = 0x49
SO_REUSEADDR = 0x4
SO_REUSEPORT = 0x200
SO_RXQ_OVFL = 0x28
SO_SECURITY_AUTHENTICATION = 0x16
SO_SECURITY_ENCRYPTION_NETWORK = 0x18
SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
SO_SELECT_ERR_QUEUE = 0x2d
SO_SNDBUF = 0x1001
SO_SNDBUFFORCE = 0x1f
SO_SNDLOWAT = 0x1003
SO_SNDTIMEO = 0x1005
SO_SNDTIMEO_NEW = 0x43
SO_SNDTIMEO_OLD = 0x1005
SO_STYLE = 0x1008
SO_TIMESTAMPING = 0x25
SO_TIMESTAMPING_NEW = 0x41
SO_TIMESTAMPING_OLD = 0x25
SO_TIMESTAMPNS = 0x23
SO_TIMESTAMPNS_NEW = 0x40
SO_TIMESTAMPNS_OLD = 0x23
SO_TIMESTAMP_NEW = 0x3f
SO_TXREHASH = 0x4a
SO_TXTIME = 0x3d
SO_TYPE = 0x1008
SO_WIFI_STATUS = 0x29
SO_ZEROCOPY = 0x3c
TAB1 = 0x800
TAB2 = 0x1000
TAB3 = 0x1800
TABDLY = 0x1800
TCFLSH = 0x5407
TCGETA = 0x5401
TCGETS = 0x540d
TCGETS2 = 0x4030542a
TCSAFLUSH = 0x5410
TCSBRK = 0x5405
TCSBRKP = 0x5486
TCSETA = 0x5402
TCSETAF = 0x5404
TCSETAW = 0x5403
TCSETS = 0x540e
TCSETS2 = 0x8030542b
TCSETSF = 0x5410
TCSETSF2 = 0x8030542d
TCSETSW = 0x540f
TCSETSW2 = 0x8030542c
TCXONC = 0x5406
TFD_CLOEXEC = 0x80000
TFD_NONBLOCK = 0x80
TIOCCBRK = 0x5428
TIOCCONS = 0x80047478
TIOCEXCL = 0x740d
TIOCGDEV = 0x40045432
TIOCGETD = 0x7400
TIOCGETP = 0x7408
TIOCGEXCL = 0x40045440
TIOCGICOUNT = 0x5492
TIOCGISO7816 = 0x40285442
TIOCGLCKTRMIOS = 0x548b
TIOCGLTC = 0x7474
TIOCGPGRP = 0x40047477
TIOCGPKT = 0x40045438
TIOCGPTLCK = 0x40045439
TIOCGPTN = 0x40045430
TIOCGPTPEER = 0x20005441
TIOCGRS485 = 0x4020542e
TIOCGSERIAL = 0x5484
TIOCGSID = 0x7416
TIOCGSOFTCAR = 0x5481
TIOCGWINSZ = 0x40087468
TIOCINQ = 0x467f
TIOCLINUX = 0x5483
TIOCMBIC = 0x741c
TIOCMBIS = 0x741b
TIOCMGET = 0x741d
TIOCMIWAIT = 0x5491
TIOCMSET = 0x741a
TIOCM_CAR = 0x100
TIOCM_CD = 0x100
TIOCM_CTS = 0x40
TIOCM_DSR = 0x400
TIOCM_RI = 0x200
TIOCM_RNG = 0x200
TIOCM_SR = 0x20
TIOCM_ST = 0x10
TIOCNOTTY = 0x5471
TIOCNXCL = 0x740e
TIOCOUTQ = 0x7472
TIOCPKT = 0x5470
TIOCSBRK = 0x5427
TIOCSCTTY = 0x5480
TIOCSERCONFIG = 0x5488
TIOCSERGETLSR = 0x548e
TIOCSERGETMULTI = 0x548f
TIOCSERGSTRUCT = 0x548d
TIOCSERGWILD = 0x5489
TIOCSERSETMULTI = 0x5490
TIOCSERSWILD = 0x548a
TIOCSER_TEMT = 0x1
TIOCSETD = 0x7401
TIOCSETN = 0x740a
TIOCSETP = 0x7409
TIOCSIG = 0x80045436
TIOCSISO7816 = 0xc0285443
TIOCSLCKTRMIOS = 0x548c
TIOCSLTC = 0x7475
TIOCSPGRP = 0x80047476
TIOCSPTLCK = 0x80045431
TIOCSRS485 = 0xc020542f
TIOCSSERIAL = 0x5485
TIOCSSOFTCAR = 0x5482
TIOCSTI = 0x5472
TIOCSWINSZ = 0x80087467
TIOCVHANGUP = 0x5437
TOSTOP = 0x8000
TUNATTACHFILTER = 0x801054d5
TUNDETACHFILTER = 0x801054d6
TUNGETDEVNETNS = 0x200054e3
TUNGETFEATURES = 0x400454cf
TUNGETFILTER = 0x401054db
TUNGETIFF = 0x400454d2
TUNGETSNDBUF = 0x400454d3
TUNGETVNETBE = 0x400454df
TUNGETVNETHDRSZ = 0x400454d7
TUNGETVNETLE = 0x400454dd
TUNSETCARRIER = 0x800454e2
TUNSETDEBUG = 0x800454c9
TUNSETFILTEREBPF = 0x400454e1
TUNSETGROUP = 0x800454ce
TUNSETIFF = 0x800454ca
TUNSETIFINDEX = 0x800454da
TUNSETLINK = 0x800454cd
TUNSETNOCSUM = 0x800454c8
TUNSETOFFLOAD = 0x800454d0
TUNSETOWNER = 0x800454cc
TUNSETPERSIST = 0x800454cb
TUNSETQUEUE = 0x800454d9
TUNSETSNDBUF = 0x800454d4
TUNSETSTEERINGEBPF = 0x400454e0
TUNSETTXFILTER = 0x800454d1
TUNSETVNETBE = 0x800454de
TUNSETVNETHDRSZ = 0x800454d8
TUNSETVNETLE = 0x800454dc
UBI_IOCATT = 0x80186f40
UBI_IOCDET = 0x80046f41
UBI_IOCEBCH = 0x80044f02
UBI_IOCEBER = 0x80044f01
UBI_IOCEBISMAP = 0x40044f05
UBI_IOCEBMAP = 0x80084f03
UBI_IOCEBUNMAP = 0x80044f04
UBI_IOCMKVOL = 0x80986f00
UBI_IOCRMVOL = 0x80046f01
UBI_IOCRNVOL = 0x91106f03
UBI_IOCRPEB = 0x80046f04
UBI_IOCRSVOL = 0x800c6f02
UBI_IOCSETVOLPROP = 0x80104f06
UBI_IOCSPEB = 0x80046f05
UBI_IOCVOLCRBLK = 0x80804f07
UBI_IOCVOLRMBLK = 0x20004f08
UBI_IOCVOLUP = 0x80084f00
VDISCARD = 0xd
VEOF = 0x10
VEOL = 0x11
VEOL2 = 0x6
VMIN = 0x4
VREPRINT = 0xc
VSTART = 0x8
VSTOP = 0x9
VSUSP = 0xa
VSWTC = 0x7
VSWTCH = 0x7
VT1 = 0x4000
VTDLY = 0x4000
VTIME = 0x5
VWERASE = 0xe
WDIOC_GETBOOTSTATUS = 0x40045702
WDIOC_GETPRETIMEOUT = 0x40045709
WDIOC_GETSTATUS = 0x40045701
WDIOC_GETSUPPORT = 0x40285700
WDIOC_GETTEMP = 0x40045703
WDIOC_GETTIMELEFT = 0x4004570a
WDIOC_GETTIMEOUT = 0x40045707
WDIOC_KEEPALIVE = 0x40045705
WDIOC_SETOPTIONS = 0x40045704
WORDSIZE = 0x40
XCASE = 0x4
XTABS = 0x1800
_HIDIOCGRAWNAME = 0x40804804
_HIDIOCGRAWPHYS = 0x40404805
_HIDIOCGRAWUNIQ = 0x40404808
)
// Errors
const (
EADDRINUSE = syscall.Errno(0x7d)
EADDRNOTAVAIL = syscall.Errno(0x7e)
EADV = syscall.Errno(0x44)
EAFNOSUPPORT = syscall.Errno(0x7c)
EALREADY = syscall.Errno(0x95)
EBADE = syscall.Errno(0x32)
EBADFD = syscall.Errno(0x51)
EBADMSG = syscall.Errno(0x4d)
EBADR = syscall.Errno(0x33)
EBADRQC = syscall.Errno(0x36)
EBADSLT = syscall.Errno(0x37)
EBFONT = syscall.Errno(0x3b)
ECANCELED = syscall.Errno(0x9e)
ECHRNG = syscall.Errno(0x25)
ECOMM = syscall.Errno(0x46)
ECONNABORTED = syscall.Errno(0x82)
ECONNREFUSED = syscall.Errno(0x92)
ECONNRESET = syscall.Errno(0x83)
EDEADLK = syscall.Errno(0x2d)
EDEADLOCK = syscall.Errno(0x38)
EDESTADDRREQ = syscall.Errno(0x60)
EDOTDOT = syscall.Errno(0x49)
EDQUOT = syscall.Errno(0x46d)
EHOSTDOWN = syscall.Errno(0x93)
EHOSTUNREACH = syscall.Errno(0x94)
EHWPOISON = syscall.Errno(0xa8)
EIDRM = syscall.Errno(0x24)
EILSEQ = syscall.Errno(0x58)
EINIT = syscall.Errno(0x8d)
EINPROGRESS = syscall.Errno(0x96)
EISCONN = syscall.Errno(0x85)
EISNAM = syscall.Errno(0x8b)
EKEYEXPIRED = syscall.Errno(0xa2)
EKEYREJECTED = syscall.Errno(0xa4)
EKEYREVOKED = syscall.Errno(0xa3)
EL2HLT = syscall.Errno(0x2c)
EL2NSYNC = syscall.Errno(0x26)
EL3HLT = syscall.Errno(0x27)
EL3RST = syscall.Errno(0x28)
ELIBACC = syscall.Errno(0x53)
ELIBBAD = syscall.Errno(0x54)
ELIBEXEC = syscall.Errno(0x57)
ELIBMAX = syscall.Errno(0x56)
ELIBSCN = syscall.Errno(0x55)
ELNRNG = syscall.Errno(0x29)
ELOOP = syscall.Errno(0x5a)
EMEDIUMTYPE = syscall.Errno(0xa0)
EMSGSIZE = syscall.Errno(0x61)
EMULTIHOP = syscall.Errno(0x4a)
ENAMETOOLONG = syscall.Errno(0x4e)
ENAVAIL = syscall.Errno(0x8a)
ENETDOWN = syscall.Errno(0x7f)
ENETRESET = syscall.Errno(0x81)
ENETUNREACH = syscall.Errno(0x80)
ENOANO = syscall.Errno(0x35)
ENOBUFS = syscall.Errno(0x84)
ENOCSI = syscall.Errno(0x2b)
ENODATA = syscall.Errno(0x3d)
ENOKEY = syscall.Errno(0xa1)
ENOLCK = syscall.Errno(0x2e)
ENOLINK = syscall.Errno(0x43)
ENOMEDIUM = syscall.Errno(0x9f)
ENOMSG = syscall.Errno(0x23)
ENONET = syscall.Errno(0x40)
ENOPKG = syscall.Errno(0x41)
ENOPROTOOPT = syscall.Errno(0x63)
ENOSR = syscall.Errno(0x3f)
ENOSTR = syscall.Errno(0x3c)
ENOSYS = syscall.Errno(0x59)
ENOTCONN = syscall.Errno(0x86)
ENOTEMPTY = syscall.Errno(0x5d)
ENOTNAM = syscall.Errno(0x89)
ENOTRECOVERABLE = syscall.Errno(0xa6)
ENOTSOCK = syscall.Errno(0x5f)
ENOTSUP = syscall.Errno(0x7a)
ENOTUNIQ = syscall.Errno(0x50)
EOPNOTSUPP = syscall.Errno(0x7a)
EOVERFLOW = syscall.Errno(0x4f)
EOWNERDEAD = syscall.Errno(0xa5)
EPFNOSUPPORT = syscall.Errno(0x7b)
EPROTO = syscall.Errno(0x47)
EPROTONOSUPPORT = syscall.Errno(0x78)
EPROTOTYPE = syscall.Errno(0x62)
EREMCHG = syscall.Errno(0x52)
EREMDEV = syscall.Errno(0x8e)
EREMOTE = syscall.Errno(0x42)
EREMOTEIO = syscall.Errno(0x8c)
ERESTART = syscall.Errno(0x5b)
ERFKILL = syscall.Errno(0xa7)
ESHUTDOWN = syscall.Errno(0x8f)
ESOCKTNOSUPPORT = syscall.Errno(0x79)
ESRMNT = syscall.Errno(0x45)
ESTALE = syscall.Errno(0x97)
ESTRPIPE = syscall.Errno(0x5c)
ETIME = syscall.Errno(0x3e)
ETIMEDOUT = syscall.Errno(0x91)
ETOOMANYREFS = syscall.Errno(0x90)
EUCLEAN = syscall.Errno(0x87)
EUNATCH = syscall.Errno(0x2a)
EUSERS = syscall.Errno(0x5e)
EXFULL = syscall.Errno(0x34)
)
// Signals
const (
SIGBUS = syscall.Signal(0xa)
SIGCHLD = syscall.Signal(0x12)
SIGCLD = syscall.Signal(0x12)
SIGCONT = syscall.Signal(0x19)
SIGEMT = syscall.Signal(0x7)
SIGIO = syscall.Signal(0x16)
SIGPOLL = syscall.Signal(0x16)
SIGPROF = syscall.Signal(0x1d)
SIGPWR = syscall.Signal(0x13)
SIGSTOP = syscall.Signal(0x17)
SIGSYS = syscall.Signal(0xc)
SIGTSTP = syscall.Signal(0x18)
SIGTTIN = syscall.Signal(0x1a)
SIGTTOU = syscall.Signal(0x1b)
SIGURG = syscall.Signal(0x15)
SIGUSR1 = syscall.Signal(0x10)
SIGUSR2 = syscall.Signal(0x11)
SIGVTALRM = syscall.Signal(0x1c)
SIGWINCH = syscall.Signal(0x14)
SIGXCPU = syscall.Signal(0x1e)
SIGXFSZ = syscall.Signal(0x1f)
)
// Error table
var errorList = [...]struct {
num syscall.Errno
name string
desc string
}{
{1, "EPERM", "operation not permitted"},
{2, "ENOENT", "no such file or directory"},
{3, "ESRCH", "no such process"},
{4, "EINTR", "interrupted system call"},
{5, "EIO", "input/output error"},
{6, "ENXIO", "no such device or address"},
{7, "E2BIG", "argument list too long"},
{8, "ENOEXEC", "exec format error"},
{9, "EBADF", "bad file descriptor"},
{10, "ECHILD", "no child processes"},
{11, "EAGAIN", "resource temporarily unavailable"},
{12, "ENOMEM", "cannot allocate memory"},
{13, "EACCES", "permission denied"},
{14, "EFAULT", "bad address"},
{15, "ENOTBLK", "block device required"},
{16, "EBUSY", "device or resource busy"},
{17, "EEXIST", "file exists"},
{18, "EXDEV", "invalid cross-device link"},
{19, "ENODEV", "no such device"},
{20, "ENOTDIR", "not a directory"},
{21, "EISDIR", "is a directory"},
{22, "EINVAL", "invalid argument"},
{23, "ENFILE", "too many open files in system"},
{24, "EMFILE", "too many open files"},
{25, "ENOTTY", "inappropriate ioctl for device"},
{26, "ETXTBSY", "text file busy"},
{27, "EFBIG", "file too large"},
{28, "ENOSPC", "no space left on device"},
{29, "ESPIPE", "illegal seek"},
{30, "EROFS", "read-only file system"},
{31, "EMLINK", "too many links"},
{32, "EPIPE", "broken pipe"},
{33, "EDOM", "numerical argument out of domain"},
{34, "ERANGE", "numerical result out of range"},
{35, "ENOMSG", "no message of desired type"},
{36, "EIDRM", "identifier removed"},
{37, "ECHRNG", "channel number out of range"},
{38, "EL2NSYNC", "level 2 not synchronized"},
{39, "EL3HLT", "level 3 halted"},
{40, "EL3RST", "level 3 reset"},
{41, "ELNRNG", "link number out of range"},
{42, "EUNATCH", "protocol driver not attached"},
{43, "ENOCSI", "no CSI structure available"},
{44, "EL2HLT", "level 2 halted"},
{45, "EDEADLK", "resource deadlock avoided"},
{46, "ENOLCK", "no locks available"},
{50, "EBADE", "invalid exchange"},
{51, "EBADR", "invalid request descriptor"},
{52, "EXFULL", "exchange full"},
{53, "ENOANO", "no anode"},
{54, "EBADRQC", "invalid request code"},
{55, "EBADSLT", "invalid slot"},
{56, "EDEADLOCK", "file locking deadlock error"},
{59, "EBFONT", "bad font file format"},
{60, "ENOSTR", "device not a stream"},
{61, "ENODATA", "no data available"},
{62, "ETIME", "timer expired"},
{63, "ENOSR", "out of streams resources"},
{64, "ENONET", "machine is not on the network"},
{65, "ENOPKG", "package not installed"},
{66, "EREMOTE", "object is remote"},
{67, "ENOLINK", "link has been severed"},
{68, "EADV", "advertise error"},
{69, "ESRMNT", "srmount error"},
{70, "ECOMM", "communication error on send"},
{71, "EPROTO", "protocol error"},
{73, "EDOTDOT", "RFS specific error"},
{74, "EMULTIHOP", "multihop attempted"},
{77, "EBADMSG", "bad message"},
{78, "ENAMETOOLONG", "file name too long"},
{79, "EOVERFLOW", "value too large for defined data type"},
{80, "ENOTUNIQ", "name not unique on network"},
{81, "EBADFD", "file descriptor in bad state"},
{82, "EREMCHG", "remote address changed"},
{83, "ELIBACC", "can not access a needed shared library"},
{84, "ELIBBAD", "accessing a corrupted shared library"},
{85, "ELIBSCN", ".lib section in a.out corrupted"},
{86, "ELIBMAX", "attempting to link in too many shared libraries"},
{87, "ELIBEXEC", "cannot exec a shared library directly"},
{88, "EILSEQ", "invalid or incomplete multibyte or wide character"},
{89, "ENOSYS", "function not implemented"},
{90, "ELOOP", "too many levels of symbolic links"},
{91, "ERESTART", "interrupted system call should be restarted"},
{92, "ESTRPIPE", "streams pipe error"},
{93, "ENOTEMPTY", "directory not empty"},
{94, "EUSERS", "too many users"},
{95, "ENOTSOCK", "socket operation on non-socket"},
{96, "EDESTADDRREQ", "destination address required"},
{97, "EMSGSIZE", "message too long"},
{98, "EPROTOTYPE", "protocol wrong type for socket"},
{99, "ENOPROTOOPT", "protocol not available"},
{120, "EPROTONOSUPPORT", "protocol not supported"},
{121, "ESOCKTNOSUPPORT", "socket type not supported"},
{122, "ENOTSUP", "operation not supported"},
{123, "EPFNOSUPPORT", "protocol family not supported"},
{124, "EAFNOSUPPORT", "address family not supported by protocol"},
{125, "EADDRINUSE", "address already in use"},
{126, "EADDRNOTAVAIL", "cannot assign requested address"},
{127, "ENETDOWN", "network is down"},
{128, "ENETUNREACH", "network is unreachable"},
{129, "ENETRESET", "network dropped connection on reset"},
{130, "ECONNABORTED", "software caused connection abort"},
{131, "ECONNRESET", "connection reset by peer"},
{132, "ENOBUFS", "no buffer space available"},
{133, "EISCONN", "transport endpoint is already connected"},
{134, "ENOTCONN", "transport endpoint is not connected"},
{135, "EUCLEAN", "structure needs cleaning"},
{137, "ENOTNAM", "not a XENIX named type file"},
{138, "ENAVAIL", "no XENIX semaphores available"},
{139, "EISNAM", "is a named type file"},
{140, "EREMOTEIO", "remote I/O error"},
{141, "EINIT", "unknown error 141"},
{142, "EREMDEV", "unknown error 142"},
{143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"},
{144, "ETOOMANYREFS", "too many references: cannot splice"},
{145, "ETIMEDOUT", "connection timed out"},
{146, "ECONNREFUSED", "connection refused"},
{147, "EHOSTDOWN", "host is down"},
{148, "EHOSTUNREACH", "no route to host"},
{149, "EALREADY", "operation already in progress"},
{150, "EINPROGRESS", "operation now in progress"},
{151, "ESTALE", "stale file handle"},
{158, "ECANCELED", "operation canceled"},
{159, "ENOMEDIUM", "no medium found"},
{160, "EMEDIUMTYPE", "wrong medium type"},
{161, "ENOKEY", "required key not available"},
{162, "EKEYEXPIRED", "key has expired"},
{163, "EKEYREVOKED", "key has been revoked"},
{164, "EKEYREJECTED", "key was rejected by service"},
{165, "EOWNERDEAD", "owner died"},
{166, "ENOTRECOVERABLE", "state not recoverable"},
{167, "ERFKILL", "operation not possible due to RF-kill"},
{168, "EHWPOISON", "memory page has hardware error"},
{1133, "EDQUOT", "disk quota exceeded"},
}
// Signal table
var signalList = [...]struct {
num syscall.Signal
name string
desc string
}{
{1, "SIGHUP", "hangup"},
{2, "SIGINT", "interrupt"},
{3, "SIGQUIT", "quit"},
{4, "SIGILL", "illegal instruction"},
{5, "SIGTRAP", "trace/breakpoint trap"},
{6, "SIGABRT", "aborted"},
{7, "SIGEMT", "EMT trap"},
{8, "SIGFPE", "floating point exception"},
{9, "SIGKILL", "killed"},
{10, "SIGBUS", "bus error"},
{11, "SIGSEGV", "segmentation fault"},
{12, "SIGSYS", "bad system call"},
{13, "SIGPIPE", "broken pipe"},
{14, "SIGALRM", "alarm clock"},
{15, "SIGTERM", "terminated"},
{16, "SIGUSR1", "user defined signal 1"},
{17, "SIGUSR2", "user defined signal 2"},
{18, "SIGCHLD", "child exited"},
{19, "SIGPWR", "power failure"},
{20, "SIGWINCH", "window changed"},
{21, "SIGURG", "urgent I/O condition"},
{22, "SIGIO", "I/O possible"},
{23, "SIGSTOP", "stopped (signal)"},
{24, "SIGTSTP", "stopped"},
{25, "SIGCONT", "continued"},
{26, "SIGTTIN", "stopped (tty input)"},
{27, "SIGTTOU", "stopped (tty output)"},
{28, "SIGVTALRM", "virtual timer expired"},
{29, "SIGPROF", "profiling timer expired"},
{30, "SIGXCPU", "CPU time limit exceeded"},
{31, "SIGXFSZ", "file size limit exceeded"},
}
| vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.0010400756727904081,
0.00019527555559761822,
0.00016221632540691644,
0.00016767463239375502,
0.00011913683556485921
] |
{
"id": 0,
"code_window": [
"\n",
"func (self *TagsController) checkout(tag *models.Tag) error {\n",
"\tself.c.LogAction(self.c.Tr.Actions.CheckoutTag)\n",
"\tif err := self.c.Helpers().Refs.CheckoutRef(tag.Name, types.CheckoutRefOptions{}); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\treturn self.c.PushContext(self.c.Contexts().Branches)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := self.c.Helpers().Refs.CheckoutRef(tag.FullRefName(), types.CheckoutRefOptions{}); err != nil {\n"
],
"file_path": "pkg/gui/controllers/tags_controller.go",
"type": "replace",
"edit_start_line_idx": 85
} | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc
#include "textflag.h"
//
// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go
//
TEXT ·syscall6(SB),NOSPLIT,$0-88
JMP syscall·syscall6(SB)
TEXT ·rawSyscall6(SB),NOSPLIT,$0-88
JMP syscall·rawSyscall6(SB)
| vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.00017649740038905293,
0.0001755668199621141,
0.00017463622498326004,
0.0001755668199621141,
9.30587702896446e-7
] |
{
"id": 0,
"code_window": [
"\n",
"func (self *TagsController) checkout(tag *models.Tag) error {\n",
"\tself.c.LogAction(self.c.Tr.Actions.CheckoutTag)\n",
"\tif err := self.c.Helpers().Refs.CheckoutRef(tag.Name, types.CheckoutRefOptions{}); err != nil {\n",
"\t\treturn err\n",
"\t}\n",
"\treturn self.c.PushContext(self.c.Contexts().Branches)\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\tif err := self.c.Helpers().Refs.CheckoutRef(tag.FullRefName(), types.CheckoutRefOptions{}); err != nil {\n"
],
"file_path": "pkg/gui/controllers/tags_controller.go",
"type": "replace",
"edit_start_line_idx": 85
} | package git_commands
import (
"path/filepath"
"strings"
"testing"
"github.com/fsmiamoto/git-todo-parser/todo"
"github.com/go-errors/errors"
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/commands/oscommands"
"github.com/jesseduffield/lazygit/pkg/commands/types/enums"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/stretchr/testify/assert"
)
var commitsOutput = strings.Replace(`0eea75e8c631fba6b58135697835d58ba4c18dbc|1640826609|Jesse Duffield|[email protected]|HEAD -> better-tests|b21997d6b4cbdf84b149|better typing for rebase mode
b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164|1640824515|Jesse Duffield|[email protected]|origin/better-tests|e94e8fc5b6fab4cb755f|fix logging
e94e8fc5b6fab4cb755f29f1bdb3ee5e001df35c|1640823749|Jesse Duffield|[email protected]|tag: 123, tag: 456|d8084cd558925eb7c9c3|refactor
d8084cd558925eb7c9c38afeed5725c21653ab90|1640821426|Jesse Duffield|[email protected]||65f910ebd85283b5cce9|WIP
65f910ebd85283b5cce9bf67d03d3f1a9ea3813a|1640821275|Jesse Duffield|[email protected]||26c07b1ab33860a1a759|WIP
26c07b1ab33860a1a7591a0638f9925ccf497ffa|1640750752|Jesse Duffield|[email protected]||3d4470a6c072208722e5|WIP
3d4470a6c072208722e5ae9a54bcb9634959a1c5|1640748818|Jesse Duffield|[email protected]||053a66a7be3da43aacdc|WIP
053a66a7be3da43aacdc7aa78e1fe757b82c4dd2|1640739815|Jesse Duffield|[email protected]||985fe482e806b172aea4|refactoring the config struct`, "|", "\x00", -1)
var singleCommitOutput = strings.Replace(`0eea75e8c631fba6b58135697835d58ba4c18dbc|1640826609|Jesse Duffield|[email protected]|HEAD -> better-tests|b21997d6b4cbdf84b149|better typing for rebase mode`, "|", "\x00", -1)
func TestGetCommits(t *testing.T) {
type scenario struct {
testName string
runner *oscommands.FakeCmdObjRunner
expectedCommits []*models.Commit
expectedError error
logOrder string
rebaseMode enums.RebaseMode
opts GetCommitsOptions
mainBranches []string
}
scenarios := []scenario{
{
testName: "should return no commits if there are none",
logOrder: "topo-order",
rebaseMode: enums.REBASE_MODE_NONE,
opts: GetCommitsOptions{RefName: "HEAD", RefForPushedStatus: "mybranch", IncludeRebaseCommits: false},
runner: oscommands.NewFakeRunner(t).
ExpectGitArgs([]string{"merge-base", "mybranch", "mybranch@{u}"}, "b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164", nil).
ExpectGitArgs([]string{"log", "HEAD", "--topo-order", "--oneline", "--pretty=format:%H%x00%at%x00%aN%x00%ae%x00%D%x00%p%x00%s%x00%m", "--abbrev=40", "--no-show-signature", "--"}, "", nil),
expectedCommits: []*models.Commit{},
expectedError: nil,
},
{
testName: "should use proper upstream name for branch",
logOrder: "topo-order",
rebaseMode: enums.REBASE_MODE_NONE,
opts: GetCommitsOptions{RefName: "refs/heads/mybranch", RefForPushedStatus: "refs/heads/mybranch", IncludeRebaseCommits: false},
runner: oscommands.NewFakeRunner(t).
ExpectGitArgs([]string{"merge-base", "refs/heads/mybranch", "mybranch@{u}"}, "b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164", nil).
ExpectGitArgs([]string{"log", "refs/heads/mybranch", "--topo-order", "--oneline", "--pretty=format:%H%x00%at%x00%aN%x00%ae%x00%D%x00%p%x00%s%x00%m", "--abbrev=40", "--no-show-signature", "--"}, "", nil),
expectedCommits: []*models.Commit{},
expectedError: nil,
},
{
testName: "should return commits if they are present",
logOrder: "topo-order",
rebaseMode: enums.REBASE_MODE_NONE,
opts: GetCommitsOptions{RefName: "HEAD", RefForPushedStatus: "mybranch", IncludeRebaseCommits: false},
mainBranches: []string{"master", "main", "develop"},
runner: oscommands.NewFakeRunner(t).
// here it's seeing which commits are yet to be pushed
ExpectGitArgs([]string{"merge-base", "mybranch", "mybranch@{u}"}, "b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164", nil).
// here it's actually getting all the commits in a formatted form, one per line
ExpectGitArgs([]string{"log", "HEAD", "--topo-order", "--oneline", "--pretty=format:%H%x00%at%x00%aN%x00%ae%x00%D%x00%p%x00%s%x00%m", "--abbrev=40", "--no-show-signature", "--"}, commitsOutput, nil).
// here it's testing which of the configured main branches have an upstream
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "master@{u}"}, "refs/remotes/origin/master", nil). // this one does
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "main@{u}"}, "", errors.New("error")). // this one doesn't, so it checks origin instead
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/remotes/origin/main"}, "", nil). // yep, origin/main exists
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "develop@{u}"}, "", errors.New("error")). // this one doesn't, so it checks origin instead
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/remotes/origin/develop"}, "", errors.New("error")). // doesn't exist there, either, so it checks for a local branch
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/heads/develop"}, "", errors.New("error")). // no local branch either
// here it's seeing where our branch diverged from the master branch so that we can mark that commit and parent commits as 'merged'
ExpectGitArgs([]string{"merge-base", "HEAD", "refs/remotes/origin/master", "refs/remotes/origin/main"}, "26c07b1ab33860a1a7591a0638f9925ccf497ffa", nil),
expectedCommits: []*models.Commit{
{
Sha: "0eea75e8c631fba6b58135697835d58ba4c18dbc",
Name: "better typing for rebase mode",
Status: models.StatusUnpushed,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "(HEAD -> better-tests)",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640826609,
Parents: []string{
"b21997d6b4cbdf84b149",
},
},
{
Sha: "b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164",
Name: "fix logging",
Status: models.StatusPushed,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "(origin/better-tests)",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640824515,
Parents: []string{
"e94e8fc5b6fab4cb755f",
},
},
{
Sha: "e94e8fc5b6fab4cb755f29f1bdb3ee5e001df35c",
Name: "refactor",
Status: models.StatusPushed,
Action: models.ActionNone,
Tags: []string{"123", "456"},
ExtraInfo: "(tag: 123, tag: 456)",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640823749,
Parents: []string{
"d8084cd558925eb7c9c3",
},
},
{
Sha: "d8084cd558925eb7c9c38afeed5725c21653ab90",
Name: "WIP",
Status: models.StatusPushed,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640821426,
Parents: []string{
"65f910ebd85283b5cce9",
},
},
{
Sha: "65f910ebd85283b5cce9bf67d03d3f1a9ea3813a",
Name: "WIP",
Status: models.StatusPushed,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640821275,
Parents: []string{
"26c07b1ab33860a1a759",
},
},
{
Sha: "26c07b1ab33860a1a7591a0638f9925ccf497ffa",
Name: "WIP",
Status: models.StatusMerged,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640750752,
Parents: []string{
"3d4470a6c072208722e5",
},
},
{
Sha: "3d4470a6c072208722e5ae9a54bcb9634959a1c5",
Name: "WIP",
Status: models.StatusMerged,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640748818,
Parents: []string{
"053a66a7be3da43aacdc",
},
},
{
Sha: "053a66a7be3da43aacdc7aa78e1fe757b82c4dd2",
Name: "refactoring the config struct",
Status: models.StatusMerged,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640739815,
Parents: []string{
"985fe482e806b172aea4",
},
},
},
expectedError: nil,
},
{
testName: "should not call merge-base for mainBranches if none exist",
logOrder: "topo-order",
rebaseMode: enums.REBASE_MODE_NONE,
opts: GetCommitsOptions{RefName: "HEAD", RefForPushedStatus: "mybranch", IncludeRebaseCommits: false},
mainBranches: []string{"master", "main"},
runner: oscommands.NewFakeRunner(t).
// here it's seeing which commits are yet to be pushed
ExpectGitArgs([]string{"merge-base", "mybranch", "mybranch@{u}"}, "b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164", nil).
// here it's actually getting all the commits in a formatted form, one per line
ExpectGitArgs([]string{"log", "HEAD", "--topo-order", "--oneline", "--pretty=format:%H%x00%at%x00%aN%x00%ae%x00%D%x00%p%x00%s%x00%m", "--abbrev=40", "--no-show-signature", "--"}, singleCommitOutput, nil).
// here it's testing which of the configured main branches exist; neither does
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "master@{u}"}, "", errors.New("error")).
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/remotes/origin/master"}, "", errors.New("error")).
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/heads/master"}, "", errors.New("error")).
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "main@{u}"}, "", errors.New("error")).
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/remotes/origin/main"}, "", errors.New("error")).
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/heads/main"}, "", errors.New("error")),
expectedCommits: []*models.Commit{
{
Sha: "0eea75e8c631fba6b58135697835d58ba4c18dbc",
Name: "better typing for rebase mode",
Status: models.StatusUnpushed,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "(HEAD -> better-tests)",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640826609,
Parents: []string{
"b21997d6b4cbdf84b149",
},
},
},
expectedError: nil,
},
{
testName: "should call merge-base for all main branches that exist",
logOrder: "topo-order",
rebaseMode: enums.REBASE_MODE_NONE,
opts: GetCommitsOptions{RefName: "HEAD", RefForPushedStatus: "mybranch", IncludeRebaseCommits: false},
mainBranches: []string{"master", "main", "develop", "1.0-hotfixes"},
runner: oscommands.NewFakeRunner(t).
// here it's seeing which commits are yet to be pushed
ExpectGitArgs([]string{"merge-base", "mybranch", "mybranch@{u}"}, "b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164", nil).
// here it's actually getting all the commits in a formatted form, one per line
ExpectGitArgs([]string{"log", "HEAD", "--topo-order", "--oneline", "--pretty=format:%H%x00%at%x00%aN%x00%ae%x00%D%x00%p%x00%s%x00%m", "--abbrev=40", "--no-show-signature", "--"}, singleCommitOutput, nil).
// here it's testing which of the configured main branches exist
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "master@{u}"}, "refs/remotes/origin/master", nil).
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "main@{u}"}, "", errors.New("error")).
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/remotes/origin/main"}, "", errors.New("error")).
ExpectGitArgs([]string{"rev-parse", "--verify", "--quiet", "refs/heads/main"}, "", errors.New("error")).
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "develop@{u}"}, "refs/remotes/origin/develop", nil).
ExpectGitArgs([]string{"rev-parse", "--symbolic-full-name", "1.0-hotfixes@{u}"}, "refs/remotes/origin/1.0-hotfixes", nil).
// here it's seeing where our branch diverged from the master branch so that we can mark that commit and parent commits as 'merged'
ExpectGitArgs([]string{"merge-base", "HEAD", "refs/remotes/origin/master", "refs/remotes/origin/develop", "refs/remotes/origin/1.0-hotfixes"}, "26c07b1ab33860a1a7591a0638f9925ccf497ffa", nil),
expectedCommits: []*models.Commit{
{
Sha: "0eea75e8c631fba6b58135697835d58ba4c18dbc",
Name: "better typing for rebase mode",
Status: models.StatusUnpushed,
Action: models.ActionNone,
Tags: []string{},
ExtraInfo: "(HEAD -> better-tests)",
AuthorName: "Jesse Duffield",
AuthorEmail: "[email protected]",
UnixTimestamp: 1640826609,
Parents: []string{
"b21997d6b4cbdf84b149",
},
},
},
expectedError: nil,
},
{
testName: "should not specify order if `log.order` is `default`",
logOrder: "default",
rebaseMode: enums.REBASE_MODE_NONE,
opts: GetCommitsOptions{RefName: "HEAD", RefForPushedStatus: "mybranch", IncludeRebaseCommits: false},
runner: oscommands.NewFakeRunner(t).
ExpectGitArgs([]string{"merge-base", "mybranch", "mybranch@{u}"}, "b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164", nil).
ExpectGitArgs([]string{"log", "HEAD", "--oneline", "--pretty=format:%H%x00%at%x00%aN%x00%ae%x00%D%x00%p%x00%s%x00%m", "--abbrev=40", "--no-show-signature", "--"}, "", nil),
expectedCommits: []*models.Commit{},
expectedError: nil,
},
{
testName: "should set filter path",
logOrder: "default",
rebaseMode: enums.REBASE_MODE_NONE,
opts: GetCommitsOptions{RefName: "HEAD", RefForPushedStatus: "mybranch", FilterPath: "src"},
runner: oscommands.NewFakeRunner(t).
ExpectGitArgs([]string{"merge-base", "mybranch", "mybranch@{u}"}, "b21997d6b4cbdf84b149d8e6a2c4d06a8e9ec164", nil).
ExpectGitArgs([]string{"log", "HEAD", "--oneline", "--pretty=format:%H%x00%at%x00%aN%x00%ae%x00%D%x00%p%x00%s%x00%m", "--abbrev=40", "--follow", "--no-show-signature", "--", "src"}, "", nil),
expectedCommits: []*models.Commit{},
expectedError: nil,
},
}
for _, scenario := range scenarios {
scenario := scenario
t.Run(scenario.testName, func(t *testing.T) {
common := utils.NewDummyCommon()
common.UserConfig.Git.Log.Order = scenario.logOrder
builder := &CommitLoader{
Common: common,
cmd: oscommands.NewDummyCmdObjBuilder(scenario.runner),
getRebaseMode: func() (enums.RebaseMode, error) { return scenario.rebaseMode, nil },
dotGitDir: ".git",
readFile: func(filename string) ([]byte, error) {
return []byte(""), nil
},
walkFiles: func(root string, fn filepath.WalkFunc) error {
return nil
},
}
common.UserConfig.Git.MainBranches = scenario.mainBranches
commits, err := builder.GetCommits(scenario.opts)
assert.Equal(t, scenario.expectedCommits, commits)
assert.Equal(t, scenario.expectedError, err)
scenario.runner.CheckForMissingCalls()
})
}
}
func TestCommitLoader_getConflictedCommitImpl(t *testing.T) {
scenarios := []struct {
testName string
todos []todo.Todo
doneTodos []todo.Todo
amendFileExists bool
expectedSha string
}{
{
testName: "no done todos",
todos: []todo.Todo{},
doneTodos: []todo.Todo{},
amendFileExists: false,
expectedSha: "",
},
{
testName: "common case (conflict)",
todos: []todo.Todo{},
doneTodos: []todo.Todo{
{
Command: todo.Pick,
Commit: "deadbeef",
},
{
Command: todo.Pick,
Commit: "fa1afe1",
},
},
amendFileExists: false,
expectedSha: "fa1afe1",
},
{
testName: "last command was 'break'",
todos: []todo.Todo{},
doneTodos: []todo.Todo{
{Command: todo.Break},
},
amendFileExists: false,
expectedSha: "",
},
{
testName: "last command was 'exec'",
todos: []todo.Todo{},
doneTodos: []todo.Todo{
{
Command: todo.Exec,
ExecCommand: "make test",
},
},
amendFileExists: false,
expectedSha: "",
},
{
testName: "last command was 'reword'",
todos: []todo.Todo{},
doneTodos: []todo.Todo{
{Command: todo.Reword},
},
amendFileExists: false,
expectedSha: "",
},
{
testName: "'pick' was rescheduled",
todos: []todo.Todo{
{
Command: todo.Pick,
Commit: "fa1afe1",
},
},
doneTodos: []todo.Todo{
{
Command: todo.Pick,
Commit: "fa1afe1",
},
},
amendFileExists: false,
expectedSha: "",
},
{
testName: "'pick' was rescheduled, buggy git version",
todos: []todo.Todo{
{
Command: todo.Pick,
Commit: "fa1afe1",
},
},
doneTodos: []todo.Todo{
{
Command: todo.Pick,
Commit: "deadbeaf",
},
{
Command: todo.Pick,
Commit: "fa1afe1",
},
{
Command: todo.Pick,
Commit: "deadbeaf",
},
},
amendFileExists: false,
expectedSha: "",
},
{
testName: "conflicting 'pick' after 'exec'",
todos: []todo.Todo{
{
Command: todo.Exec,
ExecCommand: "make test",
},
},
doneTodos: []todo.Todo{
{
Command: todo.Pick,
Commit: "deadbeaf",
},
{
Command: todo.Exec,
ExecCommand: "make test",
},
{
Command: todo.Pick,
Commit: "fa1afe1",
},
},
amendFileExists: false,
expectedSha: "fa1afe1",
},
{
testName: "'edit' with amend file",
todos: []todo.Todo{},
doneTodos: []todo.Todo{
{
Command: todo.Edit,
Commit: "fa1afe1",
},
},
amendFileExists: true,
expectedSha: "",
},
{
testName: "'edit' without amend file",
todos: []todo.Todo{},
doneTodos: []todo.Todo{
{
Command: todo.Edit,
Commit: "fa1afe1",
},
},
amendFileExists: false,
expectedSha: "fa1afe1",
},
}
for _, scenario := range scenarios {
t.Run(scenario.testName, func(t *testing.T) {
common := utils.NewDummyCommon()
builder := &CommitLoader{
Common: common,
cmd: oscommands.NewDummyCmdObjBuilder(oscommands.NewFakeRunner(t)),
getRebaseMode: func() (enums.RebaseMode, error) { return enums.REBASE_MODE_INTERACTIVE, nil },
dotGitDir: ".git",
readFile: func(filename string) ([]byte, error) {
return []byte(""), nil
},
walkFiles: func(root string, fn filepath.WalkFunc) error {
return nil
},
}
sha := builder.getConflictedCommitImpl(scenario.todos, scenario.doneTodos, scenario.amendFileExists)
assert.Equal(t, scenario.expectedSha, sha)
})
}
}
func TestCommitLoader_setCommitMergedStatuses(t *testing.T) {
type scenario struct {
testName string
commits []*models.Commit
ancestor string
expectedCommits []*models.Commit
}
scenarios := []scenario{
{
testName: "basic",
commits: []*models.Commit{
{Sha: "12345", Name: "1", Action: models.ActionNone, Status: models.StatusUnpushed},
{Sha: "67890", Name: "2", Action: models.ActionNone, Status: models.StatusPushed},
{Sha: "abcde", Name: "3", Action: models.ActionNone, Status: models.StatusPushed},
},
ancestor: "67890",
expectedCommits: []*models.Commit{
{Sha: "12345", Name: "1", Action: models.ActionNone, Status: models.StatusUnpushed},
{Sha: "67890", Name: "2", Action: models.ActionNone, Status: models.StatusMerged},
{Sha: "abcde", Name: "3", Action: models.ActionNone, Status: models.StatusMerged},
},
},
{
testName: "with update-ref",
commits: []*models.Commit{
{Sha: "12345", Name: "1", Action: models.ActionNone, Status: models.StatusUnpushed},
{Sha: "", Name: "", Action: todo.UpdateRef, Status: models.StatusNone},
{Sha: "abcde", Name: "3", Action: models.ActionNone, Status: models.StatusPushed},
},
ancestor: "deadbeef",
expectedCommits: []*models.Commit{
{Sha: "12345", Name: "1", Action: models.ActionNone, Status: models.StatusUnpushed},
{Sha: "", Name: "", Action: todo.UpdateRef, Status: models.StatusNone},
{Sha: "abcde", Name: "3", Action: models.ActionNone, Status: models.StatusPushed},
},
},
}
for _, scenario := range scenarios {
t.Run(scenario.testName, func(t *testing.T) {
expectedCommits := scenario.commits
setCommitMergedStatuses(scenario.ancestor, expectedCommits)
assert.Equal(t, scenario.expectedCommits, expectedCommits)
})
}
}
| pkg/commands/git_commands/commit_loader_test.go | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.00020914216293022037,
0.00017216638661921024,
0.00015986160724423826,
0.0001713266537990421,
0.000006731433586537605
] |
{
"id": 1,
"code_window": [
"\t\t\t).\n",
"\t\t\tPressPrimaryAction() // checkout tag\n",
"\n",
"\t\tt.Views().Branches().IsFocused().Lines(\n",
"\t\t\t/* EXPECTED:\n",
"\t\t\tContains(\"HEAD detached at tag\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t\tContains(\"tag\"),\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go",
"type": "replace",
"edit_start_line_idx": 28
} | package tag
import (
"github.com/jesseduffield/lazygit/pkg/config"
. "github.com/jesseduffield/lazygit/pkg/integration/components"
)
var CheckoutWhenBranchWithSameNameExists = NewIntegrationTest(NewIntegrationTestArgs{
Description: "Checkout a tag when there's a branch with the same name",
ExtraCmdArgs: []string{},
Skip: false,
SetupConfig: func(config *config.AppConfig) {},
SetupRepo: func(shell *Shell) {
shell.EmptyCommit("one")
shell.NewBranch("tag")
shell.Checkout("master")
shell.EmptyCommit("two")
shell.CreateLightweightTag("tag", "HEAD")
},
Run: func(t *TestDriver, keys config.KeybindingConfig) {
t.Views().Tags().
Focus().
Lines(
Contains("tag").IsSelected(),
).
PressPrimaryAction() // checkout tag
t.Views().Branches().IsFocused().Lines(
/* EXPECTED:
Contains("HEAD detached at tag").IsSelected(),
Contains("master"),
Contains("tag"),
ACTUAL: */
Contains("* tag").DoesNotContain("HEAD detached").IsSelected(),
Contains("master"),
)
},
})
| pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go | 1 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.129071906208992,
0.03925660625100136,
0.000515377672854811,
0.013719575479626656,
0.05292358994483948
] |
{
"id": 1,
"code_window": [
"\t\t\t).\n",
"\t\t\tPressPrimaryAction() // checkout tag\n",
"\n",
"\t\tt.Views().Branches().IsFocused().Lines(\n",
"\t\t\t/* EXPECTED:\n",
"\t\t\tContains(\"HEAD detached at tag\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t\tContains(\"tag\"),\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go",
"type": "replace",
"edit_start_line_idx": 28
} | # This config is used in our integration tests. If we want to modify this for a specific test, you can do so in the SetupConfig function
disableStartupPopups: true
promptToReturnFromSubprocess: false
gui:
theme:
activeBorderColor:
- green
- bold
inactiveBorderColor:
- black
SelectedRangeBgcolor:
- reverse
# Not important in tests but it creates clutter in demos
showRandomTip: false
animateExplosion: false # takes too long
git:
# We don't want to run any periodic background git commands because it'll introduce race conditions and flakiness.
# If we need to refresh something from within the test (which should only really happen if we've invoked a
# shell command in the background) we should have the user press shift+R to refresh.
# TODO: add tests which explicitly test auto-refresh functionality
autoRefresh: false
autoFetch: false
| test/default_test_config/config.yml | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.0001711731601972133,
0.0001704844180494547,
0.00016988583956845105,
0.00017039428348653018,
5.293978233567032e-7
] |
{
"id": 1,
"code_window": [
"\t\t\t).\n",
"\t\t\tPressPrimaryAction() // checkout tag\n",
"\n",
"\t\tt.Views().Branches().IsFocused().Lines(\n",
"\t\t\t/* EXPECTED:\n",
"\t\t\tContains(\"HEAD detached at tag\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t\tContains(\"tag\"),\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go",
"type": "replace",
"edit_start_line_idx": 28
} | package controllers
import (
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/gui/types"
)
type ListControllerFactory struct {
c *ControllerCommon
}
func NewListControllerFactory(c *ControllerCommon) *ListControllerFactory {
return &ListControllerFactory{
c: c,
}
}
func (self *ListControllerFactory) Create(context types.IListContext) *ListController {
return &ListController{
baseController: baseController{},
c: self.c,
context: context,
}
}
type ListController struct {
baseController
c *ControllerCommon
context types.IListContext
}
func (self *ListController) Context() types.Context {
return self.context
}
func (self *ListController) HandlePrevLine() error {
return self.handleLineChange(-1)
}
func (self *ListController) HandleNextLine() error {
return self.handleLineChange(1)
}
func (self *ListController) HandleScrollLeft() error {
return self.scrollHorizontal(self.context.GetViewTrait().ScrollLeft)
}
func (self *ListController) HandleScrollRight() error {
return self.scrollHorizontal(self.context.GetViewTrait().ScrollRight)
}
func (self *ListController) HandleScrollUp() error {
scrollHeight := self.c.UserConfig.Gui.ScrollHeight
self.context.GetViewTrait().ScrollUp(scrollHeight)
return nil
}
func (self *ListController) HandleScrollDown() error {
scrollHeight := self.c.UserConfig.Gui.ScrollHeight
self.context.GetViewTrait().ScrollDown(scrollHeight)
return nil
}
func (self *ListController) scrollHorizontal(scrollFunc func()) error {
scrollFunc()
return self.context.HandleFocus(types.OnFocusOpts{})
}
func (self *ListController) handleLineChange(change int) error {
before := self.context.GetList().GetSelectedLineIdx()
self.context.GetList().MoveSelectedLine(change)
after := self.context.GetList().GetSelectedLineIdx()
if err := self.pushContextIfNotFocused(); err != nil {
return err
}
// doing this check so that if we're holding the up key at the start of the list
// we're not constantly re-rendering the main view.
if before != after {
if change == -1 {
checkScrollUp(self.context.GetViewTrait(), self.c.UserConfig,
self.context.ModelIndexToViewIndex(before), self.context.ModelIndexToViewIndex(after))
} else if change == 1 {
checkScrollDown(self.context.GetViewTrait(), self.c.UserConfig,
self.context.ModelIndexToViewIndex(before), self.context.ModelIndexToViewIndex(after))
}
return self.context.HandleFocus(types.OnFocusOpts{})
}
return nil
}
func (self *ListController) HandlePrevPage() error {
return self.handleLineChange(-self.context.GetViewTrait().PageDelta())
}
func (self *ListController) HandleNextPage() error {
return self.handleLineChange(self.context.GetViewTrait().PageDelta())
}
func (self *ListController) HandleGotoTop() error {
return self.handleLineChange(-self.context.GetList().Len())
}
func (self *ListController) HandleGotoBottom() error {
return self.handleLineChange(self.context.GetList().Len())
}
func (self *ListController) HandleClick(opts gocui.ViewMouseBindingOpts) error {
prevSelectedLineIdx := self.context.GetList().GetSelectedLineIdx()
newSelectedLineIdx := self.context.ViewIndexToModelIndex(opts.Y)
alreadyFocused := self.isFocused()
if err := self.pushContextIfNotFocused(); err != nil {
return err
}
if newSelectedLineIdx > self.context.GetList().Len()-1 {
return nil
}
self.context.GetList().SetSelectedLineIdx(newSelectedLineIdx)
if prevSelectedLineIdx == newSelectedLineIdx && alreadyFocused && self.context.GetOnClick() != nil {
return self.context.GetOnClick()()
}
return self.context.HandleFocus(types.OnFocusOpts{})
}
func (self *ListController) pushContextIfNotFocused() error {
if !self.isFocused() {
if err := self.c.PushContext(self.context); err != nil {
return err
}
}
return nil
}
func (self *ListController) isFocused() bool {
return self.c.CurrentContext().GetKey() == self.context.GetKey()
}
func (self *ListController) GetKeybindings(opts types.KeybindingsOpts) []*types.Binding {
return []*types.Binding{
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.PrevItemAlt), Handler: self.HandlePrevLine},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.PrevItem), Handler: self.HandlePrevLine},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.NextItemAlt), Handler: self.HandleNextLine},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.NextItem), Handler: self.HandleNextLine},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.PrevPage), Handler: self.HandlePrevPage, Description: self.c.Tr.PrevPage},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.NextPage), Handler: self.HandleNextPage, Description: self.c.Tr.NextPage},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.GotoTop), Handler: self.HandleGotoTop, Description: self.c.Tr.GotoTop},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.ScrollLeft), Handler: self.HandleScrollLeft},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.ScrollRight), Handler: self.HandleScrollRight},
{Tag: "navigation", Key: opts.GetKey(opts.Config.Universal.GotoBottom), Handler: self.HandleGotoBottom, Description: self.c.Tr.GotoBottom},
}
}
func (self *ListController) GetMouseKeybindings(opts types.KeybindingsOpts) []*gocui.ViewMouseBinding {
return []*gocui.ViewMouseBinding{
{
ViewName: self.context.GetViewName(),
Key: gocui.MouseWheelUp,
Handler: func(gocui.ViewMouseBindingOpts) error { return self.HandleScrollUp() },
},
{
ViewName: self.context.GetViewName(),
Key: gocui.MouseLeft,
Handler: func(opts gocui.ViewMouseBindingOpts) error { return self.HandleClick(opts) },
},
{
ViewName: self.context.GetViewName(),
Key: gocui.MouseWheelDown,
Handler: func(gocui.ViewMouseBindingOpts) error { return self.HandleScrollDown() },
},
}
}
| pkg/gui/controllers/list_controller.go | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.0009298508521169424,
0.0002098371769534424,
0.0001650215417612344,
0.000170844592503272,
0.0001697312982287258
] |
{
"id": 1,
"code_window": [
"\t\t\t).\n",
"\t\t\tPressPrimaryAction() // checkout tag\n",
"\n",
"\t\tt.Views().Branches().IsFocused().Lines(\n",
"\t\t\t/* EXPECTED:\n",
"\t\t\tContains(\"HEAD detached at tag\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t\tContains(\"tag\"),\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go",
"type": "replace",
"edit_start_line_idx": 28
} | package components
import (
"fmt"
"io"
"math/rand"
"os"
"os/exec"
"path/filepath"
"runtime"
"time"
"github.com/jesseduffield/lazygit/pkg/utils"
)
// this is for running shell commands, mostly for the sake of setting up the repo
// but you can also run the commands from within lazygit to emulate things happening
// in the background.
type Shell struct {
// working directory the shell is invoked in
dir string
// when running the shell outside the gui we can directly panic on failure,
// but inside the gui we need to close the gui before panicking
fail func(string)
randomFileContentIndex int
}
func NewShell(dir string, fail func(string)) *Shell {
return &Shell{dir: dir, fail: fail}
}
func (self *Shell) RunCommand(args []string) *Shell {
return self.RunCommandWithEnv(args, []string{})
}
func (self *Shell) RunCommandWithEnv(args []string, env []string) *Shell {
output, err := self.runCommandWithOutputAndEnv(args, env)
if err != nil {
self.fail(fmt.Sprintf("error running command: %v\n%s", args, output))
}
return self
}
func (self *Shell) RunCommandExpectError(args []string) *Shell {
output, err := self.runCommandWithOutput(args)
if err == nil {
self.fail(fmt.Sprintf("Expected error running shell command: %v\n%s", args, output))
}
return self
}
func (self *Shell) runCommandWithOutput(args []string) (string, error) {
return self.runCommandWithOutputAndEnv(args, []string{})
}
func (self *Shell) runCommandWithOutputAndEnv(args []string, env []string) (string, error) {
cmd := exec.Command(args[0], args[1:]...)
cmd.Env = append(os.Environ(), env...)
cmd.Dir = self.dir
output, err := cmd.CombinedOutput()
return string(output), err
}
func (self *Shell) RunShellCommand(cmdStr string) *Shell {
shell := "sh"
shellArg := "-c"
if runtime.GOOS == "windows" {
shell = "cmd"
shellArg = "/C"
}
cmd := exec.Command(shell, shellArg, cmdStr)
cmd.Env = os.Environ()
cmd.Dir = self.dir
output, err := cmd.CombinedOutput()
if err != nil {
self.fail(fmt.Sprintf("error running shell command: %s\n%s", cmdStr, string(output)))
}
return self
}
func (self *Shell) CreateFile(path string, content string) *Shell {
fullPath := filepath.Join(self.dir, path)
// create any required directories
dir := filepath.Dir(fullPath)
if err := os.MkdirAll(dir, 0o755); err != nil {
self.fail(fmt.Sprintf("error creating directory: %s\n%s", dir, err))
}
err := os.WriteFile(fullPath, []byte(content), 0o644)
if err != nil {
self.fail(fmt.Sprintf("error creating file: %s\n%s", fullPath, err))
}
return self
}
func (self *Shell) DeleteFile(path string) *Shell {
fullPath := filepath.Join(self.dir, path)
err := os.RemoveAll(fullPath)
if err != nil {
self.fail(fmt.Sprintf("error deleting file: %s\n%s", fullPath, err))
}
return self
}
func (self *Shell) CreateDir(path string) *Shell {
fullPath := filepath.Join(self.dir, path)
if err := os.MkdirAll(fullPath, 0o755); err != nil {
self.fail(fmt.Sprintf("error creating directory: %s\n%s", fullPath, err))
}
return self
}
func (self *Shell) UpdateFile(path string, content string) *Shell {
fullPath := filepath.Join(self.dir, path)
err := os.WriteFile(fullPath, []byte(content), 0o644)
if err != nil {
self.fail(fmt.Sprintf("error updating file: %s\n%s", fullPath, err))
}
return self
}
func (self *Shell) NewBranch(name string) *Shell {
return self.RunCommand([]string{"git", "checkout", "-b", name})
}
func (self *Shell) NewBranchFrom(name string, from string) *Shell {
return self.RunCommand([]string{"git", "checkout", "-b", name, from})
}
func (self *Shell) Checkout(name string) *Shell {
return self.RunCommand([]string{"git", "checkout", name})
}
func (self *Shell) Merge(name string) *Shell {
return self.RunCommand([]string{"git", "merge", "--commit", "--no-ff", name})
}
func (self *Shell) ContinueMerge() *Shell {
return self.RunCommand([]string{"git", "-c", "core.editor=true", "merge", "--continue"})
}
func (self *Shell) GitAdd(path string) *Shell {
return self.RunCommand([]string{"git", "add", path})
}
func (self *Shell) GitAddAll() *Shell {
return self.RunCommand([]string{"git", "add", "-A"})
}
func (self *Shell) Commit(message string) *Shell {
return self.RunCommand([]string{"git", "commit", "-m", message})
}
func (self *Shell) EmptyCommit(message string) *Shell {
return self.RunCommand([]string{"git", "commit", "--allow-empty", "-m", message})
}
func (self *Shell) EmptyCommitDaysAgo(message string, daysAgo int) *Shell {
return self.RunCommand([]string{"git", "commit", "--allow-empty", "--date", fmt.Sprintf("%d days ago", daysAgo), "-m", message})
}
func (self *Shell) EmptyCommitWithDate(message string, date string) *Shell {
env := []string{
"GIT_AUTHOR_DATE=" + date,
"GIT_COMMITTER_DATE=" + date,
}
return self.RunCommandWithEnv([]string{"git", "commit", "--allow-empty", "-m", message}, env)
}
func (self *Shell) Revert(ref string) *Shell {
return self.RunCommand([]string{"git", "revert", ref})
}
func (self *Shell) CreateLightweightTag(name string, ref string) *Shell {
return self.RunCommand([]string{"git", "tag", name, ref})
}
func (self *Shell) CreateAnnotatedTag(name string, message string, ref string) *Shell {
return self.RunCommand([]string{"git", "tag", "-a", name, "-m", message, ref})
}
func (self *Shell) PushBranch(upstream, branch string) *Shell {
return self.RunCommand([]string{"git", "push", "--set-upstream", upstream, branch})
}
// convenience method for creating a file and adding it
func (self *Shell) CreateFileAndAdd(fileName string, fileContents string) *Shell {
return self.
CreateFile(fileName, fileContents).
GitAdd(fileName)
}
// convenience method for updating a file and adding it
func (self *Shell) UpdateFileAndAdd(fileName string, fileContents string) *Shell {
return self.
UpdateFile(fileName, fileContents).
GitAdd(fileName)
}
// convenience method for deleting a file and adding it
func (self *Shell) DeleteFileAndAdd(fileName string) *Shell {
return self.
DeleteFile(fileName).
GitAdd(fileName)
}
// creates commits 01, 02, 03, ..., n with a new file in each
// The reason for padding with zeroes is so that it's easier to do string
// matches on the commit messages when there are many of them
func (self *Shell) CreateNCommits(n int) *Shell {
return self.CreateNCommitsStartingAt(n, 1)
}
func (self *Shell) CreateNCommitsStartingAt(n, startIndex int) *Shell {
for i := startIndex; i < startIndex+n; i++ {
self.CreateFileAndAdd(
fmt.Sprintf("file%02d.txt", i),
fmt.Sprintf("file%02d content", i),
).
Commit(fmt.Sprintf("commit %02d", i))
}
return self
}
// Only to be used in demos, because the list might change and we don't want
// tests to break when it does.
func (self *Shell) CreateNCommitsWithRandomMessages(n int) *Shell {
for i := 0; i < n; i++ {
file := RandomFiles[i]
self.CreateFileAndAdd(
file.Name,
file.Content,
).
Commit(RandomCommitMessages[i])
}
return self
}
// This creates a repo history of commits
// It uses a branching strategy where each feature branch is directly branched off
// of the master branch
// Only to be used in demos
func (self *Shell) CreateRepoHistory() *Shell {
authors := []string{"Yang Wen-li", "Siegfried Kircheis", "Paul Oberstein", "Oscar Reuenthal", "Fredrica Greenhill"}
numAuthors := 5
numBranches := 10
numInitialCommits := 20
maxCommitsPerBranch := 5
// Each commit will happen on a separate day
repoStartDaysAgo := 100
totalCommits := 0
// Generate commits
for i := 0; i < numInitialCommits; i++ {
author := authors[i%numAuthors]
commitMessage := RandomCommitMessages[totalCommits%len(RandomCommitMessages)]
self.SetAuthor(author, "")
self.EmptyCommitDaysAgo(commitMessage, repoStartDaysAgo-totalCommits)
totalCommits++
}
// Generate branches and merges
for i := 0; i < numBranches; i++ {
// We'll have one author creating all the commits in the branch
author := authors[i%numAuthors]
branchName := RandomBranchNames[i%len(RandomBranchNames)]
// Choose a random commit within the last 20 commits on the master branch
lastMasterCommit := totalCommits - 1
commitOffset := rand.Intn(utils.Min(lastMasterCommit, 5)) + 1
// Create the feature branch and checkout the chosen commit
self.NewBranchFrom(branchName, fmt.Sprintf("master~%d", commitOffset))
numCommitsInBranch := rand.Intn(maxCommitsPerBranch) + 1
for j := 0; j < numCommitsInBranch; j++ {
commitMessage := RandomCommitMessages[totalCommits%len(RandomCommitMessages)]
self.SetAuthor(author, "")
self.EmptyCommitDaysAgo(commitMessage, repoStartDaysAgo-totalCommits)
totalCommits++
}
self.Checkout("master")
prevCommitterDate := os.Getenv("GIT_COMMITTER_DATE")
prevAuthorDate := os.Getenv("GIT_AUTHOR_DATE")
commitDate := time.Now().Add(time.Duration(totalCommits-repoStartDaysAgo) * time.Hour * 24)
os.Setenv("GIT_COMMITTER_DATE", commitDate.Format(time.RFC3339))
os.Setenv("GIT_AUTHOR_DATE", commitDate.Format(time.RFC3339))
// Merge branch into master
self.RunCommand([]string{"git", "merge", "--no-ff", branchName, "-m", fmt.Sprintf("Merge %s into master", branchName)})
os.Setenv("GIT_COMMITTER_DATE", prevCommitterDate)
os.Setenv("GIT_AUTHOR_DATE", prevAuthorDate)
}
return self
}
// Creates a commit with a random file
// Only to be used in demos
func (self *Shell) RandomChangeCommit(message string) *Shell {
index := self.randomFileContentIndex
self.randomFileContentIndex++
randomFileName := fmt.Sprintf("random-%d.go", index)
self.CreateFileAndAdd(randomFileName, RandomFileContents[index%len(RandomFileContents)])
return self.Commit(message)
}
func (self *Shell) SetConfig(key string, value string) *Shell {
self.RunCommand([]string{"git", "config", "--local", key, value})
return self
}
func (self *Shell) CloneIntoRemote(name string) *Shell {
self.Clone(name)
self.RunCommand([]string{"git", "remote", "add", name, "../" + name})
self.RunCommand([]string{"git", "fetch", name})
return self
}
func (self *Shell) CloneIntoSubmodule(submoduleName string) *Shell {
self.Clone("other_repo")
self.RunCommand([]string{"git", "submodule", "add", "../other_repo", submoduleName})
return self
}
func (self *Shell) Clone(repoName string) *Shell {
self.RunCommand([]string{"git", "clone", "--bare", ".", "../" + repoName})
return self
}
func (self *Shell) SetBranchUpstream(branch string, upstream string) *Shell {
self.RunCommand([]string{"git", "branch", "--set-upstream-to=" + upstream, branch})
return self
}
func (self *Shell) RemoveRemoteBranch(remoteName string, branch string) *Shell {
self.RunCommand([]string{"git", "-C", "../" + remoteName, "branch", "-d", branch})
return self
}
func (self *Shell) HardReset(ref string) *Shell {
self.RunCommand([]string{"git", "reset", "--hard", ref})
return self
}
func (self *Shell) Stash(message string) *Shell {
self.RunCommand([]string{"git", "stash", "push", "-m", message})
return self
}
func (self *Shell) StartBisect(good string, bad string) *Shell {
self.RunCommand([]string{"git", "bisect", "start", good, bad})
return self
}
func (self *Shell) Init() *Shell {
self.RunCommand([]string{"git", "-c", "init.defaultBranch=master", "init"})
return self
}
func (self *Shell) AddWorktree(base string, path string, newBranchName string) *Shell {
return self.RunCommand([]string{
"git", "worktree", "add", "-b",
newBranchName, path, base,
})
}
// add worktree and have it checkout the base branch
func (self *Shell) AddWorktreeCheckout(base string, path string) *Shell {
return self.RunCommand([]string{
"git", "worktree", "add", path, base,
})
}
func (self *Shell) AddFileInWorktree(worktreePath string) *Shell {
self.CreateFile(filepath.Join(worktreePath, "content"), "content")
self.RunCommand([]string{
"git", "-C", worktreePath, "add", "content",
})
return self
}
func (self *Shell) MakeExecutable(path string) *Shell {
// 0755 sets the executable permission for owner, and read/execute permissions for group and others
err := os.Chmod(filepath.Join(self.dir, path), 0o755)
if err != nil {
panic(err)
}
return self
}
// Help files are located at test/files from the root the lazygit repo.
// E.g. You may want to create a pre-commit hook file there, then call this
// function to copy it into your test repo.
func (self *Shell) CopyHelpFile(source string, destination string) *Shell {
return self.CopyFile(fmt.Sprintf("../../../../../files/%s", source), destination)
}
func (self *Shell) CopyFile(source string, destination string) *Shell {
absSourcePath := filepath.Join(self.dir, source)
absDestPath := filepath.Join(self.dir, destination)
sourceFile, err := os.Open(absSourcePath)
if err != nil {
self.fail(err.Error())
}
defer sourceFile.Close()
destinationFile, err := os.Create(absDestPath)
if err != nil {
self.fail(err.Error())
}
defer destinationFile.Close()
_, err = io.Copy(destinationFile, sourceFile)
if err != nil {
self.fail(err.Error())
}
// copy permissions to destination file too
sourceFileInfo, err := os.Stat(absSourcePath)
if err != nil {
self.fail(err.Error())
}
err = os.Chmod(absDestPath, sourceFileInfo.Mode())
if err != nil {
self.fail(err.Error())
}
return self
}
// NOTE: this only takes effect before running the test;
// the test will still run in the original directory
func (self *Shell) Chdir(path string) *Shell {
self.dir = filepath.Join(self.dir, path)
return self
}
func (self *Shell) SetAuthor(authorName string, authorEmail string) *Shell {
self.RunCommand([]string{"git", "config", "--local", "user.name", authorName})
self.RunCommand([]string{"git", "config", "--local", "user.email", authorEmail})
return self
}
| pkg/integration/components/shell.go | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.00042869162280112505,
0.00018324929988011718,
0.00016352301463484764,
0.00017158719128929079,
0.00004500652721617371
] |
{
"id": 2,
"code_window": [
"\t\t\tContains(\"HEAD detached at tag\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t\tContains(\"tag\"),\n",
"\t\t\tACTUAL: */\n",
"\t\t\tContains(\"* tag\").DoesNotContain(\"HEAD detached\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t)\n",
"\t},\n",
"})"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go",
"type": "replace",
"edit_start_line_idx": 32
} | package controllers
import (
"github.com/jesseduffield/gocui"
"github.com/jesseduffield/lazygit/pkg/commands/models"
"github.com/jesseduffield/lazygit/pkg/gui/context"
"github.com/jesseduffield/lazygit/pkg/gui/types"
"github.com/jesseduffield/lazygit/pkg/utils"
)
type TagsController struct {
baseController
c *ControllerCommon
}
var _ types.IController = &TagsController{}
func NewTagsController(
common *ControllerCommon,
) *TagsController {
return &TagsController{
baseController: baseController{},
c: common,
}
}
func (self *TagsController) GetKeybindings(opts types.KeybindingsOpts) []*types.Binding {
bindings := []*types.Binding{
{
Key: opts.GetKey(opts.Config.Universal.Select),
Handler: self.withSelectedTag(self.checkout),
Description: self.c.Tr.Checkout,
},
{
Key: opts.GetKey(opts.Config.Universal.Remove),
Handler: self.withSelectedTag(self.delete),
Description: self.c.Tr.ViewDeleteOptions,
OpensMenu: true,
},
{
Key: opts.GetKey(opts.Config.Branches.PushTag),
Handler: self.withSelectedTag(self.push),
Description: self.c.Tr.PushTag,
},
{
Key: opts.GetKey(opts.Config.Universal.New),
Handler: self.create,
Description: self.c.Tr.CreateTag,
},
{
Key: opts.GetKey(opts.Config.Commits.ViewResetOptions),
Handler: self.withSelectedTag(self.createResetMenu),
Description: self.c.Tr.ViewResetOptions,
OpensMenu: true,
},
}
return bindings
}
func (self *TagsController) GetOnRenderToMain() func() error {
return func() error {
return self.c.Helpers().Diff.WithDiffModeCheck(func() error {
var task types.UpdateTask
tag := self.context().GetSelected()
if tag == nil {
task = types.NewRenderStringTask("No tags")
} else {
cmdObj := self.c.Git().Branch.GetGraphCmdObj(tag.FullRefName())
task = types.NewRunCommandTask(cmdObj.GetCmd())
}
return self.c.RenderToMainViews(types.RefreshMainOpts{
Pair: self.c.MainViewPairs().Normal,
Main: &types.ViewUpdateOpts{
Title: "Tag",
Task: task,
},
})
})
}
}
func (self *TagsController) checkout(tag *models.Tag) error {
self.c.LogAction(self.c.Tr.Actions.CheckoutTag)
if err := self.c.Helpers().Refs.CheckoutRef(tag.Name, types.CheckoutRefOptions{}); err != nil {
return err
}
return self.c.PushContext(self.c.Contexts().Branches)
}
func (self *TagsController) localDelete(tag *models.Tag) error {
return self.c.WithWaitingStatus(self.c.Tr.DeletingStatus, func(gocui.Task) error {
self.c.LogAction(self.c.Tr.Actions.DeleteLocalTag)
err := self.c.Git().Tag.LocalDelete(tag.Name)
_ = self.c.Refresh(types.RefreshOptions{Mode: types.ASYNC, Scope: []types.RefreshableView{types.COMMITS, types.TAGS}})
return err
})
}
func (self *TagsController) remoteDelete(tag *models.Tag) error {
title := utils.ResolvePlaceholderString(
self.c.Tr.SelectRemoteTagUpstream,
map[string]string{
"tagName": tag.Name,
},
)
return self.c.Prompt(types.PromptOpts{
Title: title,
InitialContent: "origin",
FindSuggestionsFunc: self.c.Helpers().Suggestions.GetRemoteSuggestionsFunc(),
HandleConfirm: func(upstream string) error {
confirmTitle := utils.ResolvePlaceholderString(
self.c.Tr.DeleteTagTitle,
map[string]string{
"tagName": tag.Name,
},
)
confirmPrompt := utils.ResolvePlaceholderString(
self.c.Tr.DeleteRemoteTagPrompt,
map[string]string{
"tagName": tag.Name,
"upstream": upstream,
},
)
return self.c.Confirm(types.ConfirmOpts{
Title: confirmTitle,
Prompt: confirmPrompt,
HandleConfirm: func() error {
return self.c.WithInlineStatus(tag, types.ItemOperationDeleting, context.TAGS_CONTEXT_KEY, func(task gocui.Task) error {
self.c.LogAction(self.c.Tr.Actions.DeleteRemoteTag)
if err := self.c.Git().Remote.DeleteRemoteTag(task, upstream, tag.Name); err != nil {
return err
}
self.c.Toast(self.c.Tr.RemoteTagDeletedMessage)
return self.c.Refresh(types.RefreshOptions{Mode: types.ASYNC, Scope: []types.RefreshableView{types.COMMITS, types.TAGS}})
})
},
})
},
})
}
func (self *TagsController) delete(tag *models.Tag) error {
menuTitle := utils.ResolvePlaceholderString(
self.c.Tr.DeleteTagTitle,
map[string]string{
"tagName": tag.Name,
},
)
menuItems := []*types.MenuItem{
{
Label: self.c.Tr.DeleteLocalTag,
Key: 'c',
OnPress: func() error {
return self.localDelete(tag)
},
},
{
Label: self.c.Tr.DeleteRemoteTag,
Key: 'r',
OpensMenu: true,
OnPress: func() error {
return self.remoteDelete(tag)
},
},
}
return self.c.Menu(types.CreateMenuOptions{
Title: menuTitle,
Items: menuItems,
})
}
func (self *TagsController) push(tag *models.Tag) error {
title := utils.ResolvePlaceholderString(
self.c.Tr.PushTagTitle,
map[string]string{
"tagName": tag.Name,
},
)
return self.c.Prompt(types.PromptOpts{
Title: title,
InitialContent: "origin",
FindSuggestionsFunc: self.c.Helpers().Suggestions.GetRemoteSuggestionsFunc(),
HandleConfirm: func(response string) error {
return self.c.WithInlineStatus(tag, types.ItemOperationPushing, context.TAGS_CONTEXT_KEY, func(task gocui.Task) error {
self.c.LogAction(self.c.Tr.Actions.PushTag)
err := self.c.Git().Tag.Push(task, response, tag.Name)
// Render again to remove the inline status:
self.c.OnUIThread(func() error {
_ = self.c.Contexts().Tags.HandleRender()
return nil
})
return err
})
},
})
}
func (self *TagsController) createResetMenu(tag *models.Tag) error {
return self.c.Helpers().Refs.CreateGitResetMenu(tag.Name)
}
func (self *TagsController) create() error {
// leaving commit SHA blank so that we're just creating the tag for the current commit
return self.c.Helpers().Tags.OpenCreateTagPrompt("", func() { self.context().SetSelectedLineIdx(0) })
}
func (self *TagsController) withSelectedTag(f func(tag *models.Tag) error) func() error {
return func() error {
tag := self.context().GetSelected()
if tag == nil {
return nil
}
return f(tag)
}
}
func (self *TagsController) Context() types.Context {
return self.context()
}
func (self *TagsController) context() *context.TagsContext {
return self.c.Contexts().Tags
}
| pkg/gui/controllers/tags_controller.go | 1 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.0009992927080020308,
0.0002170021616620943,
0.0001617396919755265,
0.00016927275282796472,
0.00016664981376379728
] |
{
"id": 2,
"code_window": [
"\t\t\tContains(\"HEAD detached at tag\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t\tContains(\"tag\"),\n",
"\t\t\tACTUAL: */\n",
"\t\t\tContains(\"* tag\").DoesNotContain(\"HEAD detached\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t)\n",
"\t},\n",
"})"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go",
"type": "replace",
"edit_start_line_idx": 32
} | // Copyright 2015 Garrett D'Amore
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package encoding
import (
"golang.org/x/text/encoding"
)
type validUtf8 struct{}
// UTF8 is an encoding for UTF-8. All it does is verify that the UTF-8
// in is valid. The main reason for its existence is that it will detect
// and report ErrSrcShort or ErrDstShort, whereas the Nop encoding just
// passes every byte, blithely.
var UTF8 encoding.Encoding = validUtf8{}
func (validUtf8) NewDecoder() *encoding.Decoder {
return &encoding.Decoder{Transformer: encoding.UTF8Validator}
}
func (validUtf8) NewEncoder() *encoding.Encoder {
return &encoding.Encoder{Transformer: encoding.UTF8Validator}
}
| vendor/github.com/gdamore/encoding/utf8.go | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.00017748466052580625,
0.00017037635552696884,
0.00016218151722569019,
0.00017091962217818946,
0.000006379105798259843
] |
{
"id": 2,
"code_window": [
"\t\t\tContains(\"HEAD detached at tag\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t\tContains(\"tag\"),\n",
"\t\t\tACTUAL: */\n",
"\t\t\tContains(\"* tag\").DoesNotContain(\"HEAD detached\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t)\n",
"\t},\n",
"})"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go",
"type": "replace",
"edit_start_line_idx": 32
} | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (386 || amd64 || amd64p32) && gc
package cpu
// cpuid is implemented in cpu_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
// and in cpu_gccgo.c for gccgo.
func xgetbv() (eax, edx uint32)
| vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.00022522598737850785,
0.0001989976444747299,
0.00017276930157095194,
0.0001989976444747299,
0.000026228342903777957
] |
{
"id": 2,
"code_window": [
"\t\t\tContains(\"HEAD detached at tag\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t\tContains(\"tag\"),\n",
"\t\t\tACTUAL: */\n",
"\t\t\tContains(\"* tag\").DoesNotContain(\"HEAD detached\").IsSelected(),\n",
"\t\t\tContains(\"master\"),\n",
"\t\t)\n",
"\t},\n",
"})"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/integration/tests/tag/checkout_when_branch_with_same_name_exists.go",
"type": "replace",
"edit_start_line_idx": 32
} | package ui
import (
"github.com/jesseduffield/lazygit/pkg/config"
. "github.com/jesseduffield/lazygit/pkg/integration/components"
)
var OpenLinkFailure = NewIntegrationTest(NewIntegrationTestArgs{
Description: "When opening links via the OS fails, show a dialog instead.",
ExtraCmdArgs: []string{},
Skip: false,
SetupConfig: func(config *config.AppConfig) {
config.UserConfig.OS.OpenLink = "exit 42"
},
SetupRepo: func(shell *Shell) {},
Run: func(t *TestDriver, keys config.KeybindingConfig) {
t.Views().Information().Click(0, 0)
t.ExpectPopup().Confirmation().
Title(Equals("Donate")).
Content(Equals("Please go to https://github.com/sponsors/jesseduffield")).
Confirm()
},
})
| pkg/integration/tests/ui/open_link_failure.go | 0 | https://github.com/jesseduffield/lazygit/commit/f244ec8251d77e41d02507811f49c388aa67a042 | [
0.00017484916315879673,
0.00017268907686229795,
0.00017116058734245598,
0.0001720574509818107,
0.000001570690415064746
] |
{
"id": 0,
"code_window": [
" },\n",
"\n",
" hasMatchingGlobPath(pathName, capability) {\n",
" const globPaths = this.get('globPaths');\n",
" if (globPaths) {\n",
" const matchingPath = Object.keys(globPaths).find(k => pathName.includes(k));\n",
" const hasMatchingPath =\n",
" (matchingPath && !this.isDenied(globPaths[matchingPath])) || globPaths.hasOwnProperty('');\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" const matchingPath = Object.keys(globPaths).find(k => {\n",
" return pathName.includes(k) || pathName.includes(k.replace(/\\/$/, ''));\n",
" });\n"
],
"file_path": "ui/app/services/permissions.js",
"type": "replace",
"edit_start_line_idx": 140
} | import { module, test } from 'qunit';
import { setupTest } from 'ember-qunit';
import Pretender from 'pretender';
import Service from '@ember/service';
const PERMISSIONS_RESPONSE = {
data: {
exact_paths: {
foo: {
capabilities: ['read'],
},
'bar/bee': {
capabilities: ['create', 'list'],
},
boo: {
capabilities: ['deny'],
},
},
glob_paths: {
'baz/biz': {
capabilities: ['read'],
},
},
},
};
module('Unit | Service | permissions', function(hooks) {
setupTest(hooks);
hooks.beforeEach(function() {
this.server = new Pretender();
this.server.get('/v1/sys/internal/ui/resultant-acl', () => {
return [200, { 'Content-Type': 'application/json' }, JSON.stringify(PERMISSIONS_RESPONSE)];
});
});
hooks.afterEach(function() {
this.server.shutdown();
});
test('sets paths properly', async function(assert) {
let service = this.owner.lookup('service:permissions');
await service.getPaths.perform();
assert.deepEqual(service.get('exactPaths'), PERMISSIONS_RESPONSE.data.exact_paths);
assert.deepEqual(service.get('globPaths'), PERMISSIONS_RESPONSE.data.glob_paths);
});
test('returns true if a policy includes access to an exact path', function(assert) {
let service = this.owner.lookup('service:permissions');
service.set('exactPaths', PERMISSIONS_RESPONSE.data.exact_paths);
assert.equal(service.hasPermission('foo'), true);
});
test('returns true if a paths prefix is included in the policys exact paths', function(assert) {
let service = this.owner.lookup('service:permissions');
service.set('exactPaths', PERMISSIONS_RESPONSE.data.exact_paths);
assert.equal(service.hasPermission('bar'), true);
});
test('it returns true if a policy includes access to a glob path', function(assert) {
let service = this.owner.lookup('service:permissions');
service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);
assert.equal(service.hasPermission('baz/biz/hi'), true);
});
test('it returns true if a policy includes access to the * glob path', function(assert) {
let service = this.owner.lookup('service:permissions');
const splatPath = { '': {} };
service.set('globPaths', splatPath);
assert.equal(service.hasPermission('hi'), true);
});
test('it returns false if the matched path includes the deny capability', function(assert) {
let service = this.owner.lookup('service:permissions');
service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);
assert.equal(service.hasPermission('boo'), false);
});
test('it returns false if a policy does not includes access to a path', function(assert) {
let service = this.owner.lookup('service:permissions');
assert.equal(service.hasPermission('danger'), false);
});
test('sets the root token', function(assert) {
let service = this.owner.lookup('service:permissions');
service.setPaths({ data: { root: true } });
assert.equal(service.canViewAll, true);
});
test('returns true with the root token', function(assert) {
let service = this.owner.lookup('service:permissions');
service.set('canViewAll', true);
assert.equal(service.hasPermission('hi'), true);
});
test('it returns true if a policy has the specified capabilities on a path', function(assert) {
let service = this.owner.lookup('service:permissions');
service.set('exactPaths', PERMISSIONS_RESPONSE.data.exact_paths);
service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);
assert.equal(service.hasPermission('bar/bee', ['create', 'list']), true);
assert.equal(service.hasPermission('baz/biz', ['read']), true);
});
test('it returns false if a policy does not have the specified capabilities on a path', function(assert) {
let service = this.owner.lookup('service:permissions');
service.set('exactPaths', PERMISSIONS_RESPONSE.data.exact_paths);
service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);
assert.equal(service.hasPermission('bar/bee', ['create', 'delete']), false);
assert.equal(service.hasPermission('foo', ['create']), false);
});
test('defaults to show all items when policy cannot be found', async function(assert) {
let service = this.owner.lookup('service:permissions');
this.server.get('/v1/sys/internal/ui/resultant-acl', () => {
return [403, { 'Content-Type': 'application/json' }];
});
await service.getPaths.perform();
assert.equal(service.canViewAll, true);
});
test('returns the first allowed nav route for policies', function(assert) {
let service = this.owner.lookup('service:permissions');
const policyPaths = {
'sys/policies/acl': {
capabilities: ['deny'],
},
'sys/policies/rgp': {
capabilities: ['read'],
},
};
service.set('exactPaths', policyPaths);
assert.equal(service.navPathParams('policies'), 'rgp');
});
test('returns the first allowed nav route for access', function(assert) {
let service = this.owner.lookup('service:permissions');
const accessPaths = {
'sys/auth': {
capabilities: ['deny'],
},
'identity/entities': {
capabilities: ['read'],
},
};
const expected = ['vault.cluster.access.identity', 'entities'];
service.set('exactPaths', accessPaths);
assert.deepEqual(service.navPathParams('access'), expected);
});
test('hasNavPermission returns true if a policy includes access to at least one path', function(assert) {
let service = this.owner.lookup('service:permissions');
const accessPaths = {
'sys/auth': {
capabilities: ['deny'],
},
'sys/leases/lookup': {
capabilities: ['read'],
},
};
service.set('exactPaths', accessPaths);
assert.equal(service.hasNavPermission('access', 'leases'), true);
});
test('hasNavPermission returns false if a policy does not include access to any paths', function(assert) {
let service = this.owner.lookup('service:permissions');
service.set('exactPaths', {});
assert.equal(service.hasNavPermission('access'), false);
});
test('appends the namespace to the path if there is one', function(assert) {
const namespaceService = Service.extend({
path: 'marketing',
});
this.owner.register('service:namespace', namespaceService);
let service = this.owner.lookup('service:permissions');
assert.equal(service.pathNameWithNamespace('sys/auth'), 'marketing/sys/auth');
});
});
| ui/tests/unit/services/permissions-test.js | 1 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.10915493965148926,
0.008661204017698765,
0.0001665996969677508,
0.0003041164600290358,
0.02569398656487465
] |
{
"id": 0,
"code_window": [
" },\n",
"\n",
" hasMatchingGlobPath(pathName, capability) {\n",
" const globPaths = this.get('globPaths');\n",
" if (globPaths) {\n",
" const matchingPath = Object.keys(globPaths).find(k => pathName.includes(k));\n",
" const hasMatchingPath =\n",
" (matchingPath && !this.isDenied(globPaths[matchingPath])) || globPaths.hasOwnProperty('');\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" const matchingPath = Object.keys(globPaths).find(k => {\n",
" return pathName.includes(k) || pathName.includes(k.replace(/\\/$/, ''));\n",
" });\n"
],
"file_path": "ui/app/services/permissions.js",
"type": "replace",
"edit_start_line_idx": 140
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errors provides detailed error types for api field validation.
package errors // import "k8s.io/apimachinery/pkg/api/errors"
| vendor/k8s.io/apimachinery/pkg/api/errors/doc.go | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.00017525644216220826,
0.0001738417922751978,
0.00017242714238818735,
0.0001738417922751978,
0.0000014146498870104551
] |
{
"id": 0,
"code_window": [
" },\n",
"\n",
" hasMatchingGlobPath(pathName, capability) {\n",
" const globPaths = this.get('globPaths');\n",
" if (globPaths) {\n",
" const matchingPath = Object.keys(globPaths).find(k => pathName.includes(k));\n",
" const hasMatchingPath =\n",
" (matchingPath && !this.isDenied(globPaths[matchingPath])) || globPaths.hasOwnProperty('');\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" const matchingPath = Object.keys(globPaths).find(k => {\n",
" return pathName.includes(k) || pathName.includes(k.replace(/\\/$/, ''));\n",
" });\n"
],
"file_path": "ui/app/services/permissions.js",
"type": "replace",
"edit_start_line_idx": 140
} | Copyright (c) 2012, Neal van Veen ([email protected])
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
| vendor/github.com/Nvveen/Gotty/LICENSE | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.00016898837930057198,
0.00016737186524551362,
0.00016505930398125201,
0.00016806791245471686,
0.0000016778496956249
] |
{
"id": 0,
"code_window": [
" },\n",
"\n",
" hasMatchingGlobPath(pathName, capability) {\n",
" const globPaths = this.get('globPaths');\n",
" if (globPaths) {\n",
" const matchingPath = Object.keys(globPaths).find(k => pathName.includes(k));\n",
" const hasMatchingPath =\n",
" (matchingPath && !this.isDenied(globPaths[matchingPath])) || globPaths.hasOwnProperty('');\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
" const matchingPath = Object.keys(globPaths).find(k => {\n",
" return pathName.includes(k) || pathName.includes(k.replace(/\\/$/, ''));\n",
" });\n"
],
"file_path": "ui/app/services/permissions.js",
"type": "replace",
"edit_start_line_idx": 140
} | package command
import (
"strings"
"testing"
"github.com/mitchellh/cli"
)
func testSecretsListCommand(tb testing.TB) (*cli.MockUi, *SecretsListCommand) {
tb.Helper()
ui := cli.NewMockUi()
return ui, &SecretsListCommand{
BaseCommand: &BaseCommand{
UI: ui,
},
}
}
func TestSecretsListCommand_Run(t *testing.T) {
t.Parallel()
cases := []struct {
name string
args []string
out string
code int
}{
{
"too_many_args",
[]string{"foo"},
"Too many arguments",
1,
},
{
"lists",
nil,
"Path",
0,
},
{
"detailed",
[]string{"-detailed"},
"Default TTL",
0,
},
}
t.Run("validations", func(t *testing.T) {
t.Parallel()
for _, tc := range cases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
client, closer := testVaultServer(t)
defer closer()
ui, cmd := testSecretsListCommand(t)
cmd.client = client
code := cmd.Run(tc.args)
if code != tc.code {
t.Errorf("expected %d to be %d", code, tc.code)
}
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, tc.out) {
t.Errorf("expected %q to contain %q", combined, tc.out)
}
})
}
})
t.Run("communication_failure", func(t *testing.T) {
t.Parallel()
client, closer := testVaultServerBad(t)
defer closer()
ui, cmd := testSecretsListCommand(t)
cmd.client = client
code := cmd.Run([]string{})
if exp := 2; code != exp {
t.Errorf("expected %d to be %d", code, exp)
}
expected := "Error listing secrets engines: "
combined := ui.OutputWriter.String() + ui.ErrorWriter.String()
if !strings.Contains(combined, expected) {
t.Errorf("expected %q to contain %q", combined, expected)
}
})
t.Run("no_tabs", func(t *testing.T) {
t.Parallel()
_, cmd := testSecretsListCommand(t)
assertNoTabs(t, cmd)
})
}
| command/secrets_list_test.go | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.00017166530597023666,
0.00016854074783623219,
0.00016418764425907284,
0.00016973882156889886,
0.0000023346831312665017
] |
{
"id": 1,
"code_window": [
" 'baz/biz': {\n",
" capabilities: ['read'],\n",
" },\n",
" },\n",
" },\n",
"};\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" 'ends/in/slash/': {\n",
" capabilities: ['list'],\n",
" },\n"
],
"file_path": "ui/tests/unit/services/permissions-test.js",
"type": "add",
"edit_start_line_idx": 22
} | import Service, { inject as service } from '@ember/service';
import { task } from 'ember-concurrency';
const API_PATHS = {
secrets: { engine: 'cubbyhole/' },
access: {
methods: 'sys/auth',
entities: 'identity/entities',
groups: 'identity/groups',
leases: 'sys/leases/lookup',
namespaces: 'sys/namespaces',
'control-groups': 'sys/control-group/',
},
policies: {
acl: 'sys/policies/acl',
rgp: 'sys/policies/rgp',
egp: 'sys/policies/egp',
},
tools: {
wrap: 'sys/wrapping/wrap',
lookup: 'sys/wrapping/lookup',
unwrap: 'sys/wrapping/unwrap',
rewrap: 'sys/wrapping/rewrap',
random: 'sys/tools/random',
hash: 'sys/tools/hash',
},
status: {
replication: 'sys/replication',
license: 'sys/license',
seal: 'sys/seal',
},
};
const API_PATHS_TO_ROUTE_PARAMS = {
'sys/auth': ['vault.cluster.access.methods'],
'identity/entities': ['vault.cluster.access.identity', 'entities'],
'identity/groups': ['vault.cluster.access.identity', 'groups'],
'sys/leases/lookup': ['vault.cluster.access.leases'],
'sys/namespaces': ['vault.cluster.access.namespaces'],
'sys/control-group/': ['vault.cluster.access.control-groups'],
};
/*
The Permissions service is used to gate top navigation and sidebar items. It fetches
a users' policy from the resultant-acl endpoint and stores their allowed exact and glob
paths as state. It also has methods for checking whether a user has permission for a given
path.
*/
export default Service.extend({
exactPaths: null,
globPaths: null,
canViewAll: null,
store: service(),
auth: service(),
namespace: service(),
getPaths: task(function*() {
if (this.paths) {
return;
}
try {
let resp = yield this.get('store')
.adapterFor('permissions')
.query();
this.setPaths(resp);
return;
} catch (err) {
// If no policy can be found, default to showing all nav items.
this.set('canViewAll', true);
}
}),
setPaths(resp) {
this.set('exactPaths', resp.data.exact_paths);
this.set('globPaths', resp.data.glob_paths);
this.set('canViewAll', resp.data.root);
},
reset() {
this.set('exactPaths', null);
this.set('globPaths', null);
this.set('canViewAll', null);
},
hasNavPermission(navItem, routeParams) {
if (routeParams) {
return this.hasPermission(API_PATHS[navItem][routeParams]);
}
return Object.values(API_PATHS[navItem]).some(path => this.hasPermission(path));
},
navPathParams(navItem) {
const path = Object.values(API_PATHS[navItem]).find(path => this.hasPermission(path));
if (['policies', 'tools'].includes(navItem)) {
return path.split('/').lastObject;
}
return API_PATHS_TO_ROUTE_PARAMS[path];
},
pathNameWithNamespace(pathName) {
const namespace = this.get('namespace').path;
if (namespace) {
return `${namespace}/${pathName}`;
} else {
return pathName;
}
},
hasPermission(pathName, capabilities = [null]) {
const path = this.pathNameWithNamespace(pathName);
if (this.canViewAll) {
return true;
}
return capabilities.every(
capability => this.hasMatchingExactPath(path, capability) || this.hasMatchingGlobPath(path, capability)
);
},
hasMatchingExactPath(pathName, capability) {
const exactPaths = this.get('exactPaths');
if (exactPaths) {
const prefix = Object.keys(exactPaths).find(path => path.startsWith(pathName));
const hasMatchingPath = prefix && !this.isDenied(exactPaths[prefix]);
if (prefix && capability) {
return this.hasCapability(exactPaths[prefix], capability) && hasMatchingPath;
}
return hasMatchingPath;
}
return false;
},
hasMatchingGlobPath(pathName, capability) {
const globPaths = this.get('globPaths');
if (globPaths) {
const matchingPath = Object.keys(globPaths).find(k => pathName.includes(k));
const hasMatchingPath =
(matchingPath && !this.isDenied(globPaths[matchingPath])) || globPaths.hasOwnProperty('');
if (matchingPath && capability) {
return this.hasCapability(globPaths[matchingPath], capability) && hasMatchingPath;
}
return hasMatchingPath;
}
return false;
},
hasCapability(path, capability) {
return path.capabilities.includes(capability);
},
isDenied(path) {
return path.capabilities.includes('deny');
},
});
| ui/app/services/permissions.js | 1 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.007671655621379614,
0.0006400339771062136,
0.00016336279804818332,
0.0001704295282252133,
0.0017589560011401772
] |
{
"id": 1,
"code_window": [
" 'baz/biz': {\n",
" capabilities: ['read'],\n",
" },\n",
" },\n",
" },\n",
"};\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" 'ends/in/slash/': {\n",
" capabilities: ['list'],\n",
" },\n"
],
"file_path": "ui/tests/unit/services/permissions-test.js",
"type": "add",
"edit_start_line_idx": 22
} |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| vendor/cloud.google.com/go/LICENSE | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.0001751429954310879,
0.0001714250392979011,
0.00016587784921284765,
0.000172574698808603,
0.0000025386427751072915
] |
{
"id": 1,
"code_window": [
" 'baz/biz': {\n",
" capabilities: ['read'],\n",
" },\n",
" },\n",
" },\n",
"};\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" 'ends/in/slash/': {\n",
" capabilities: ['list'],\n",
" },\n"
],
"file_path": "ui/tests/unit/services/permissions-test.js",
"type": "add",
"edit_start_line_idx": 22
} | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Bluetooth sockets and messages
package unix
// Bluetooth Protocols
const (
BTPROTO_L2CAP = 0
BTPROTO_HCI = 1
BTPROTO_SCO = 2
BTPROTO_RFCOMM = 3
BTPROTO_BNEP = 4
BTPROTO_CMTP = 5
BTPROTO_HIDP = 6
BTPROTO_AVDTP = 7
)
const (
HCI_CHANNEL_RAW = 0
HCI_CHANNEL_USER = 1
HCI_CHANNEL_MONITOR = 2
HCI_CHANNEL_CONTROL = 3
)
// Socketoption Level
const (
SOL_BLUETOOTH = 0x112
SOL_HCI = 0x0
SOL_L2CAP = 0x6
SOL_RFCOMM = 0x12
SOL_SCO = 0x11
)
| vendor/golang.org/x/sys/unix/bluetooth_linux.go | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.001491131610237062,
0.0005430799210444093,
0.00017475700587965548,
0.00025321549037471414,
0.0005499782273545861
] |
{
"id": 1,
"code_window": [
" 'baz/biz': {\n",
" capabilities: ['read'],\n",
" },\n",
" },\n",
" },\n",
"};\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" 'ends/in/slash/': {\n",
" capabilities: ['list'],\n",
" },\n"
],
"file_path": "ui/tests/unit/services/permissions-test.js",
"type": "add",
"edit_start_line_idx": 22
} | package http
import (
"encoding/json"
"reflect"
"testing"
"github.com/hashicorp/vault/vault"
)
func TestSysAudit(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/audit/noop", map[string]interface{}{
"type": "noop",
})
testResponseStatus(t, resp, 204)
resp = testHttpGet(t, token, addr+"/v1/sys/audit")
var actual map[string]interface{}
expected := map[string]interface{}{
"lease_id": "",
"renewable": false,
"lease_duration": json.Number("0"),
"wrap_info": nil,
"warnings": nil,
"auth": nil,
"data": map[string]interface{}{
"noop/": map[string]interface{}{
"path": "noop/",
"type": "noop",
"description": "",
"options": map[string]interface{}{},
"local": false,
},
},
"noop/": map[string]interface{}{
"path": "noop/",
"type": "noop",
"description": "",
"options": map[string]interface{}{},
"local": false,
},
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:\n%#v actual:\n%#v\n", expected, actual)
}
}
func TestSysDisableAudit(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/audit/foo", map[string]interface{}{
"type": "noop",
})
testResponseStatus(t, resp, 204)
resp = testHttpDelete(t, token, addr+"/v1/sys/audit/foo")
testResponseStatus(t, resp, 204)
resp = testHttpGet(t, token, addr+"/v1/sys/audit")
var actual map[string]interface{}
expected := map[string]interface{}{
"lease_id": "",
"renewable": false,
"lease_duration": json.Number("0"),
"wrap_info": nil,
"warnings": nil,
"auth": nil,
"data": map[string]interface{}{},
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad:\nactual: %#v\nexpected: %#v\n", actual, expected)
}
}
func TestSysAuditHash(t *testing.T) {
core, _, token := vault.TestCoreUnsealed(t)
ln, addr := TestServer(t, core)
defer ln.Close()
TestServerAuth(t, addr, token)
resp := testHttpPost(t, token, addr+"/v1/sys/audit/noop", map[string]interface{}{
"type": "noop",
})
testResponseStatus(t, resp, 204)
resp = testHttpPost(t, token, addr+"/v1/sys/audit-hash/noop", map[string]interface{}{
"input": "bar",
})
var actual map[string]interface{}
expected := map[string]interface{}{
"lease_id": "",
"renewable": false,
"lease_duration": json.Number("0"),
"wrap_info": nil,
"warnings": nil,
"auth": nil,
"data": map[string]interface{}{
"hash": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
},
"hash": "hmac-sha256:f9320baf0249169e73850cd6156ded0106e2bb6ad8cab01b7bbbebe6d1065317",
}
testResponseStatus(t, resp, 200)
testResponseBody(t, resp, &actual)
expected["request_id"] = actual["request_id"]
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("bad: expected:\n%#v\n, got:\n%#v\n", expected, actual)
}
}
| http/sys_audit_test.go | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.0001863828074419871,
0.00017025636043399572,
0.00016419529856648296,
0.0001693940139375627,
0.000005464139121613698
] |
{
"id": 2,
"code_window": [
" let service = this.owner.lookup('service:permissions');\n",
" service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);\n",
" assert.equal(service.hasPermission('boo'), false);\n",
" });\n",
"\n",
" test('it returns false if a policy does not includes access to a path', function(assert) {\n",
" let service = this.owner.lookup('service:permissions');\n",
" assert.equal(service.hasPermission('danger'), false);\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" test('it returns true if passed path does not end in a slash but globPath does', function(assert) {\n",
" let service = this.owner.lookup('service:permissions');\n",
" service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);\n",
" assert.equal(service.hasPermission('ends/in/slash'), true, 'matches without slash');\n",
" assert.equal(service.hasPermission('ends/in/slash/'), true, 'matches with slash');\n",
" });\n",
"\n"
],
"file_path": "ui/tests/unit/services/permissions-test.js",
"type": "add",
"edit_start_line_idx": 78
} | import Service, { inject as service } from '@ember/service';
import { task } from 'ember-concurrency';
const API_PATHS = {
secrets: { engine: 'cubbyhole/' },
access: {
methods: 'sys/auth',
entities: 'identity/entities',
groups: 'identity/groups',
leases: 'sys/leases/lookup',
namespaces: 'sys/namespaces',
'control-groups': 'sys/control-group/',
},
policies: {
acl: 'sys/policies/acl',
rgp: 'sys/policies/rgp',
egp: 'sys/policies/egp',
},
tools: {
wrap: 'sys/wrapping/wrap',
lookup: 'sys/wrapping/lookup',
unwrap: 'sys/wrapping/unwrap',
rewrap: 'sys/wrapping/rewrap',
random: 'sys/tools/random',
hash: 'sys/tools/hash',
},
status: {
replication: 'sys/replication',
license: 'sys/license',
seal: 'sys/seal',
},
};
const API_PATHS_TO_ROUTE_PARAMS = {
'sys/auth': ['vault.cluster.access.methods'],
'identity/entities': ['vault.cluster.access.identity', 'entities'],
'identity/groups': ['vault.cluster.access.identity', 'groups'],
'sys/leases/lookup': ['vault.cluster.access.leases'],
'sys/namespaces': ['vault.cluster.access.namespaces'],
'sys/control-group/': ['vault.cluster.access.control-groups'],
};
/*
The Permissions service is used to gate top navigation and sidebar items. It fetches
a users' policy from the resultant-acl endpoint and stores their allowed exact and glob
paths as state. It also has methods for checking whether a user has permission for a given
path.
*/
export default Service.extend({
exactPaths: null,
globPaths: null,
canViewAll: null,
store: service(),
auth: service(),
namespace: service(),
getPaths: task(function*() {
if (this.paths) {
return;
}
try {
let resp = yield this.get('store')
.adapterFor('permissions')
.query();
this.setPaths(resp);
return;
} catch (err) {
// If no policy can be found, default to showing all nav items.
this.set('canViewAll', true);
}
}),
setPaths(resp) {
this.set('exactPaths', resp.data.exact_paths);
this.set('globPaths', resp.data.glob_paths);
this.set('canViewAll', resp.data.root);
},
reset() {
this.set('exactPaths', null);
this.set('globPaths', null);
this.set('canViewAll', null);
},
hasNavPermission(navItem, routeParams) {
if (routeParams) {
return this.hasPermission(API_PATHS[navItem][routeParams]);
}
return Object.values(API_PATHS[navItem]).some(path => this.hasPermission(path));
},
navPathParams(navItem) {
const path = Object.values(API_PATHS[navItem]).find(path => this.hasPermission(path));
if (['policies', 'tools'].includes(navItem)) {
return path.split('/').lastObject;
}
return API_PATHS_TO_ROUTE_PARAMS[path];
},
pathNameWithNamespace(pathName) {
const namespace = this.get('namespace').path;
if (namespace) {
return `${namespace}/${pathName}`;
} else {
return pathName;
}
},
hasPermission(pathName, capabilities = [null]) {
const path = this.pathNameWithNamespace(pathName);
if (this.canViewAll) {
return true;
}
return capabilities.every(
capability => this.hasMatchingExactPath(path, capability) || this.hasMatchingGlobPath(path, capability)
);
},
hasMatchingExactPath(pathName, capability) {
const exactPaths = this.get('exactPaths');
if (exactPaths) {
const prefix = Object.keys(exactPaths).find(path => path.startsWith(pathName));
const hasMatchingPath = prefix && !this.isDenied(exactPaths[prefix]);
if (prefix && capability) {
return this.hasCapability(exactPaths[prefix], capability) && hasMatchingPath;
}
return hasMatchingPath;
}
return false;
},
hasMatchingGlobPath(pathName, capability) {
const globPaths = this.get('globPaths');
if (globPaths) {
const matchingPath = Object.keys(globPaths).find(k => pathName.includes(k));
const hasMatchingPath =
(matchingPath && !this.isDenied(globPaths[matchingPath])) || globPaths.hasOwnProperty('');
if (matchingPath && capability) {
return this.hasCapability(globPaths[matchingPath], capability) && hasMatchingPath;
}
return hasMatchingPath;
}
return false;
},
hasCapability(path, capability) {
return path.capabilities.includes(capability);
},
isDenied(path) {
return path.capabilities.includes('deny');
},
});
| ui/app/services/permissions.js | 1 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.012141318060457706,
0.0015367366140708327,
0.0001632189378142357,
0.00021301301603671163,
0.0033673788420856
] |
{
"id": 2,
"code_window": [
" let service = this.owner.lookup('service:permissions');\n",
" service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);\n",
" assert.equal(service.hasPermission('boo'), false);\n",
" });\n",
"\n",
" test('it returns false if a policy does not includes access to a path', function(assert) {\n",
" let service = this.owner.lookup('service:permissions');\n",
" assert.equal(service.hasPermission('danger'), false);\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" test('it returns true if passed path does not end in a slash but globPath does', function(assert) {\n",
" let service = this.owner.lookup('service:permissions');\n",
" service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);\n",
" assert.equal(service.hasPermission('ends/in/slash'), true, 'matches without slash');\n",
" assert.equal(service.hasPermission('ends/in/slash/'), true, 'matches with slash');\n",
" });\n",
"\n"
],
"file_path": "ui/tests/unit/services/permissions-test.js",
"type": "add",
"edit_start_line_idx": 78
} | ---
layout: "docs"
page_title: "MSSQL - Database - Secrets Engines"
sidebar_title: "MSSQL"
sidebar_current: "docs-secrets-databases-mssql"
description: |-
MSSQL is one of the supported plugins for the database secrets engine. This
plugin generates database credentials dynamically based on configured roles
for the MSSQL database.
---
# MSSQL Database Secrets Engine
MSSQL is one of the supported plugins for the database secrets engine. This
plugin generates database credentials dynamically based on configured roles for
the MSSQL database.
See the [database secrets engine](/docs/secrets/databases/index.html) docs for
more information about setting up the database secrets engine.
## Setup
1. Enable the database secrets engine if it is not already enabled:
```text
$ vault secrets enable database
Success! Enabled the database secrets engine at: database/
```
By default, the secrets engine will enable at the name of the engine. To
enable the secrets engine at a different path, use the `-path` argument.
1. Configure Vault with the proper plugin and connection information:
```text
$ vault write database/config/my-mssql-database \
plugin_name=mssql-database-plugin \
connection_url='sqlserver://{{username}}:{{password}}@localhost:1433' \
allowed_roles="my-role" \
username="sa" \
password="yourStrong(!)Password"
```
In this case, we've configured Vault with the user "sa" and password
"yourStrong(!)Password", connecting to an instance at "localhost" on port 1433. It is
not necessary that Vault has the sa login, but the user must have privileges
to create logins and manage processes. The fixed server roles
`securityadmin` and `processadmin` are examples of built-in roles that grant
these permissions. The user also must have privileges to create database
users and grant permissions in the databases that Vault manages. The fixed
database roles `db_accessadmin` and `db_securityadmin` are examples or
built-in roles that grant these permissions.
1. Configure a role that maps a name in Vault to an SQL statement to execute to
create the database credential:
```text
$ vault write database/roles/my-role \
db_name=my-mssql-database \
creation_statements="CREATE LOGIN [{{name}}] WITH PASSWORD = '{{password}}';\
CREATE USER [{{name}}] FOR LOGIN [{{name}}];\
GRANT SELECT ON SCHEMA::dbo TO [{{name}}];" \
default_ttl="1h" \
max_ttl="24h"
Success! Data written to: database/roles/my-role
```
## Usage
After the secrets engine is configured and a user/machine has a Vault token with
the proper permission, it can generate credentials.
1. Generate a new credential by reading from the `/creds` endpoint with the name
of the role:
```text
$ vault read database/creds/my-role
Key Value
--- -----
lease_id database/creds/my-role/2f6a614c-4aa2-7b19-24b9-ad944a8d4de6
lease_duration 1h
lease_renewable true
password 8cab931c-d62e-a73d-60d3-5ee85139cd66
username v-root-e2978cd0-
```
## Example for Azure SQL Database
Here is a complete example using Azure SQL Database. Note that databases in Azure SQL Database are [contained databases](https://docs.microsoft.com/en-us/sql/relational-databases/databases/contained-databases) and that we do not create a login for the user; instead, we associate the password directly with the user itself. Also note that you will need a separate connection and role for each Azure SQL database for which you want to generate dynamic credentials. You can use a single database backend mount for all these databases or use a separate mount for of them. In this example, we use a custom path for the database backend.
First, we mount a database backend at the azuresql path with `vault secrets enable -path=azuresql database`. Then we configure a connection called "testvault" to connect to a database called "test-vault", using "azuresql" at the beginning of our path:
```
$ vault write azuresql/config/testvault \
plugin_name=mssql-database-plugin \
connection_url='server=hashisqlserver.database.windows.net;port=1433; \
user id=admin;password=pAssw0rd;database=test-vault;app name=vault;' \
allowed_roles="test"
```
Now we add a role called "test" for use with the "testvault" connection:
```
$ vault write azuresql/roles/test \
db_name=testvault \
creation_statements="CREATE USER [{{name}}] WITH PASSWORD = '{{password}}';" \
revocation_statements="DROP USER IF EXISTS [{{name}}]" \
default_ttl="1h" \
max_ttl="24h"
```
We can now use this role to dynamically generate credentials for the Azure SQL database, test-vault:
```
$ vault read azuresql/creds/test
Key Value
--- -----
lease_id azuresql/creds/test/2e5b1e0b-a081-c7e1-5622-39f58e79a719
lease_duration 1h0m0s
lease_renewable true
password A1a-48w04t1xzw1s33z3
username v-token-test-tr2t4x9pxvq1z8878s9s-1513446795
```
When we no longer need the backend, we can unmount it with `vault unmount azuresql`. Now, you can use the MSSQL Database Plugin with your Azure SQL databases.
## Amazon RDS
The MSSQL plugin supports databases running on [Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html),
but there are differences that need to be accomodated. A key limitation is that Amazon RDS doesn't support
the "sysadmin" role, which is used by default during Vault's revocation process for MSSQL. The workaround
is to add custom revocation statements to roles, for example:
```
vault write database/roles/my-role revocation_statements="\
USE my_database; \
IF EXISTS \
(SELECT name \
FROM sys.database_principals \
WHERE name = N'{{name}}') \
BEGIN \
DROP USER [{{name}}] \
END \
IF EXISTS \
(SELECT name \
FROM master.sys.server_principals \
WHERE name = N'{{name}}') \
BEGIN \
DROP LOGIN [{{name}}] \
END"
```
## API
The full list of configurable options can be seen in the [MSSQL database
plugin API](/api/secret/databases/mssql.html) page.
For more information on the database secrets engine's HTTP API please see the
[Database secrets engine API](/api/secret/databases/index.html) page.
| website/source/docs/secrets/databases/mssql.html.md | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.00017412344459444284,
0.00016654992941766977,
0.00015993465785868466,
0.00016694159421604127,
0.000003849638687825063
] |
{
"id": 2,
"code_window": [
" let service = this.owner.lookup('service:permissions');\n",
" service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);\n",
" assert.equal(service.hasPermission('boo'), false);\n",
" });\n",
"\n",
" test('it returns false if a policy does not includes access to a path', function(assert) {\n",
" let service = this.owner.lookup('service:permissions');\n",
" assert.equal(service.hasPermission('danger'), false);\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" test('it returns true if passed path does not end in a slash but globPath does', function(assert) {\n",
" let service = this.owner.lookup('service:permissions');\n",
" service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);\n",
" assert.equal(service.hasPermission('ends/in/slash'), true, 'matches without slash');\n",
" assert.equal(service.hasPermission('ends/in/slash/'), true, 'matches with slash');\n",
" });\n",
"\n"
],
"file_path": "ui/tests/unit/services/permissions-test.js",
"type": "add",
"edit_start_line_idx": 78
} | // Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package openpgp implements high level operations on OpenPGP messages.
package openpgp // import "github.com/keybase/go-crypto/openpgp"
import (
"crypto"
"crypto/hmac"
_ "crypto/sha256"
"hash"
"io"
"strconv"
"github.com/keybase/go-crypto/openpgp/armor"
"github.com/keybase/go-crypto/openpgp/errors"
"github.com/keybase/go-crypto/openpgp/packet"
)
// SignatureType is the armor type for a PGP signature.
var SignatureType = "PGP SIGNATURE"
// readArmored reads an armored block with the given type.
func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
block, err := armor.Decode(r)
if err != nil {
return
}
if block.Type != expectedType {
return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
}
return block.Body, nil
}
// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
// signed message.
type MessageDetails struct {
IsEncrypted bool // true if the message was encrypted.
EncryptedToKeyIds []uint64 // the list of recipient key ids.
IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
DecryptedWith Key // the private key used to decrypt the message, if any.
IsSigned bool // true if the message is signed.
SignedByKeyId uint64 // the key id of the signer, if any.
SignedBy *Key // the key of the signer, if available.
LiteralData *packet.LiteralData // the metadata of the contents
UnverifiedBody io.Reader // the contents of the message.
// If IsSigned is true and SignedBy is non-zero then the signature will
// be verified as UnverifiedBody is read. The signature cannot be
// checked until the whole of UnverifiedBody is read so UnverifiedBody
// must be consumed until EOF before the data can trusted. Even if a
// message isn't signed (or the signer is unknown) the data may contain
// an authentication code that is only checked once UnverifiedBody has
// been consumed. Once EOF has been seen, the following fields are
// valid. (An authentication code failure is reported as a
// SignatureError error when reading from UnverifiedBody.)
SignatureError error // nil if the signature is good.
Signature *packet.Signature // the signature packet itself, if v4 (default)
SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature
// Does the Message include multiple signatures? Also called "nested signatures".
MultiSig bool
decrypted io.ReadCloser
}
// A PromptFunction is used as a callback by functions that may need to decrypt
// a private key, or prompt for a passphrase. It is called with a list of
// acceptable, encrypted private keys and a boolean that indicates whether a
// passphrase is usable. It should either decrypt a private key or return a
// passphrase to try. If the decrypted private key or given passphrase isn't
// correct, the function will be called again, forever. Any error returned will
// be passed up.
type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
// A keyEnvelopePair is used to store a private key with the envelope that
// contains a symmetric key, encrypted with that key.
type keyEnvelopePair struct {
key Key
encryptedKey *packet.EncryptedKey
}
// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
// The given KeyRing should contain both public keys (for signature
// verification) and, possibly encrypted, private keys for decrypting.
// If config is nil, sensible defaults will be used.
func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
var p packet.Packet
var symKeys []*packet.SymmetricKeyEncrypted
var pubKeys []keyEnvelopePair
var se *packet.SymmetricallyEncrypted
packets := packet.NewReader(r)
md = new(MessageDetails)
md.IsEncrypted = true
// The message, if encrypted, starts with a number of packets
// containing an encrypted decryption key. The decryption key is either
// encrypted to a public key, or with a passphrase. This loop
// collects these packets.
ParsePackets:
for {
p, err = packets.Next()
if err != nil {
return nil, err
}
switch p := p.(type) {
case *packet.SymmetricKeyEncrypted:
// This packet contains the decryption key encrypted with a passphrase.
md.IsSymmetricallyEncrypted = true
symKeys = append(symKeys, p)
case *packet.EncryptedKey:
// This packet contains the decryption key encrypted to a public key.
md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
switch p.Algo {
case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH:
break
default:
continue
}
var keys []Key
if p.KeyId == 0 {
keys = keyring.DecryptionKeys()
} else {
keys = keyring.KeysById(p.KeyId, nil)
}
for _, k := range keys {
pubKeys = append(pubKeys, keyEnvelopePair{k, p})
}
case *packet.SymmetricallyEncrypted:
se = p
break ParsePackets
case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
// This message isn't encrypted.
if len(symKeys) != 0 || len(pubKeys) != 0 {
return nil, errors.StructuralError("key material not followed by encrypted message")
}
packets.Unread(p)
return readSignedMessage(packets, nil, keyring)
}
}
var candidates []Key
var decrypted io.ReadCloser
// Now that we have the list of encrypted keys we need to decrypt at
// least one of them or, if we cannot, we need to call the prompt
// function so that it can decrypt a key or give us a passphrase.
FindKey:
for {
// See if any of the keys already have a private key available
candidates = candidates[:0]
candidateFingerprints := make(map[string]bool)
for _, pk := range pubKeys {
if pk.key.PrivateKey == nil {
continue
}
if !pk.key.PrivateKey.Encrypted {
if len(pk.encryptedKey.Key) == 0 {
pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
}
if len(pk.encryptedKey.Key) == 0 {
continue
}
decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
if err != nil && err != errors.ErrKeyIncorrect {
return nil, err
}
if decrypted != nil {
md.DecryptedWith = pk.key
break FindKey
}
} else {
fpr := string(pk.key.PublicKey.Fingerprint[:])
if v := candidateFingerprints[fpr]; v {
continue
}
candidates = append(candidates, pk.key)
candidateFingerprints[fpr] = true
}
}
if len(candidates) == 0 && len(symKeys) == 0 {
return nil, errors.ErrKeyIncorrect
}
if prompt == nil {
return nil, errors.ErrKeyIncorrect
}
passphrase, err := prompt(candidates, len(symKeys) != 0)
if err != nil {
return nil, err
}
// Try the symmetric passphrase first
if len(symKeys) != 0 && passphrase != nil {
for _, s := range symKeys {
key, cipherFunc, err := s.Decrypt(passphrase)
if err == nil {
decrypted, err = se.Decrypt(cipherFunc, key)
if err != nil && err != errors.ErrKeyIncorrect {
return nil, err
}
if decrypted != nil {
break FindKey
}
}
}
}
}
md.decrypted = decrypted
if err := packets.Push(decrypted); err != nil {
return nil, err
}
return readSignedMessage(packets, md, keyring)
}
// readSignedMessage reads a possibly signed message if mdin is non-zero then
// that structure is updated and returned. Otherwise a fresh MessageDetails is
// used.
func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) {
if mdin == nil {
mdin = new(MessageDetails)
}
md = mdin
var p packet.Packet
var h hash.Hash
var wrappedHash hash.Hash
FindLiteralData:
for {
p, err = packets.Next()
if err != nil {
return nil, err
}
switch p := p.(type) {
case *packet.Compressed:
if err := packets.Push(p.Body); err != nil {
return nil, err
}
case *packet.OnePassSignature:
if md.IsSigned {
// If IsSigned is set, it means we have multiple
// OnePassSignature packets.
md.MultiSig = true
if md.SignedBy != nil {
// We've already found the signature we were looking
// for, made by key that we had in keyring and can
// check signature against. Continue with that instead
// of trying to find another.
continue FindLiteralData
}
}
h, wrappedHash, err = hashForSignature(p.Hash, p.SigType)
if err != nil {
md = nil
return
}
md.IsSigned = true
md.SignedByKeyId = p.KeyId
keys := keyring.KeysByIdUsage(p.KeyId, nil, packet.KeyFlagSign)
if len(keys) > 0 {
md.SignedBy = &keys[0]
}
case *packet.LiteralData:
md.LiteralData = p
break FindLiteralData
}
}
if md.SignedBy != nil {
md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md}
} else if md.decrypted != nil {
md.UnverifiedBody = checkReader{md}
} else {
md.UnverifiedBody = md.LiteralData.Body
}
return md, nil
}
// hashForSignature returns a pair of hashes that can be used to verify a
// signature. The signature may specify that the contents of the signed message
// should be preprocessed (i.e. to normalize line endings). Thus this function
// returns two hashes. The second should be used to hash the message itself and
// performs any needed preprocessing.
func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) {
if !hashId.Available() {
return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId)))
}
h := hashId.New()
switch sigType {
case packet.SigTypeBinary:
return h, h, nil
case packet.SigTypeText:
return h, NewCanonicalTextHash(h), nil
}
return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
}
// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
// MDC checks.
type checkReader struct {
md *MessageDetails
}
func (cr checkReader) Read(buf []byte) (n int, err error) {
n, err = cr.md.LiteralData.Body.Read(buf)
if err == io.EOF {
mdcErr := cr.md.decrypted.Close()
if mdcErr != nil {
err = mdcErr
}
}
return
}
// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
// the data as it is read. When it sees an EOF from the underlying io.Reader
// it parses and checks a trailing Signature packet and triggers any MDC checks.
type signatureCheckReader struct {
packets *packet.Reader
h, wrappedHash hash.Hash
md *MessageDetails
}
func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) {
n, err = scr.md.LiteralData.Body.Read(buf)
scr.wrappedHash.Write(buf[:n])
if err == io.EOF {
for {
var p packet.Packet
p, scr.md.SignatureError = scr.packets.Next()
if scr.md.SignatureError != nil {
if scr.md.MultiSig {
// If we are in MultiSig, we might have found other
// signature that cannot be verified using our key.
// Clear Signature field so it's clear for consumers
// that this message failed to verify.
scr.md.Signature = nil
}
return
}
var ok bool
if scr.md.Signature, ok = p.(*packet.Signature); ok {
var err error
if keyID := scr.md.Signature.IssuerKeyId; keyID != nil {
if *keyID != scr.md.SignedBy.PublicKey.KeyId {
if scr.md.MultiSig {
continue // try again to find a sig we can verify
}
err = errors.StructuralError("bad key id")
}
}
if fingerprint := scr.md.Signature.IssuerFingerprint; fingerprint != nil {
if !hmac.Equal(fingerprint, scr.md.SignedBy.PublicKey.Fingerprint[:]) {
if scr.md.MultiSig {
continue // try again to find a sig we can verify
}
err = errors.StructuralError("bad key fingerprint")
}
}
if err == nil {
err = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature)
}
scr.md.SignatureError = err
} else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok {
scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3)
} else {
scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature")
return
}
// Parse only one packet by default, unless message is MultiSig. Then
// we ask for more packets after discovering non-matching signature,
// until we find one that we can verify.
break
}
// The SymmetricallyEncrypted packet, if any, might have an
// unsigned hash of its own. In order to check this we need to
// close that Reader.
if scr.md.decrypted != nil {
mdcErr := scr.md.decrypted.Close()
if mdcErr != nil {
err = mdcErr
}
}
}
return
}
// CheckDetachedSignature takes a signed file and a detached signature and
// returns the signer if the signature is valid. If the signer isn't known,
// ErrUnknownIssuer is returned.
func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
signer, _, err = checkDetachedSignature(keyring, signed, signature)
return signer, err
}
func checkDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) {
var issuerKeyId uint64
var issuerFingerprint []byte
var hashFunc crypto.Hash
var sigType packet.SignatureType
var keys []Key
var p packet.Packet
packets := packet.NewReader(signature)
for {
p, err = packets.Next()
if err == io.EOF {
return nil, nil, errors.ErrUnknownIssuer
}
if err != nil {
return nil, nil, err
}
switch sig := p.(type) {
case *packet.Signature:
if sig.IssuerKeyId == nil {
return nil, nil, errors.StructuralError("signature doesn't have an issuer")
}
issuerKeyId = *sig.IssuerKeyId
hashFunc = sig.Hash
sigType = sig.SigType
issuerFingerprint = sig.IssuerFingerprint
case *packet.SignatureV3:
issuerKeyId = sig.IssuerKeyId
hashFunc = sig.Hash
sigType = sig.SigType
default:
return nil, nil, errors.StructuralError("non signature packet found")
}
keys = keyring.KeysByIdUsage(issuerKeyId, issuerFingerprint, packet.KeyFlagSign)
if len(keys) > 0 {
break
}
}
if len(keys) == 0 {
panic("unreachable")
}
h, wrappedHash, err := hashForSignature(hashFunc, sigType)
if err != nil {
return nil, nil, err
}
if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
return nil, nil, err
}
for _, key := range keys {
switch sig := p.(type) {
case *packet.Signature:
err = key.PublicKey.VerifySignature(h, sig)
case *packet.SignatureV3:
err = key.PublicKey.VerifySignatureV3(h, sig)
default:
panic("unreachable")
}
if err == nil {
return key.Entity, &issuerKeyId, nil
}
}
return nil, nil, err
}
// CheckArmoredDetachedSignature performs the same actions as
// CheckDetachedSignature but expects the signature to be armored.
func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) {
signer, _, err = checkArmoredDetachedSignature(keyring, signed, signature)
return signer, err
}
func checkArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, issuer *uint64, err error) {
body, err := readArmored(signature, SignatureType)
if err != nil {
return
}
return checkDetachedSignature(keyring, signed, body)
}
| vendor/github.com/keybase/go-crypto/openpgp/read.go | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.0007903387886472046,
0.00018347725563216954,
0.00016249560576397926,
0.0001727979542920366,
0.00008590942161390558
] |
{
"id": 2,
"code_window": [
" let service = this.owner.lookup('service:permissions');\n",
" service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);\n",
" assert.equal(service.hasPermission('boo'), false);\n",
" });\n",
"\n",
" test('it returns false if a policy does not includes access to a path', function(assert) {\n",
" let service = this.owner.lookup('service:permissions');\n",
" assert.equal(service.hasPermission('danger'), false);\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
" test('it returns true if passed path does not end in a slash but globPath does', function(assert) {\n",
" let service = this.owner.lookup('service:permissions');\n",
" service.set('globPaths', PERMISSIONS_RESPONSE.data.glob_paths);\n",
" assert.equal(service.hasPermission('ends/in/slash'), true, 'matches without slash');\n",
" assert.equal(service.hasPermission('ends/in/slash/'), true, 'matches with slash');\n",
" });\n",
"\n"
],
"file_path": "ui/tests/unit/services/permissions-test.js",
"type": "add",
"edit_start_line_idx": 78
} | // Copyright 2009,2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// FreeBSD system calls.
// This file is compiled as ordinary Go code,
// but it is also input to mksyscall,
// which parses the //sys lines and generates system call stubs.
// Note that sometimes we use a lowercase //sys name and wrap
// it in our own nicer implementation, either here or in
// syscall_bsd.go or syscall_unix.go.
package unix
import (
"unsafe"
)
// SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets.
type SockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [46]int8
raw RawSockaddrDatalink
}
// Translate "kern.hostname" to []_C_int{0,1,2,3}.
func nametomib(name string) (mib []_C_int, err error) {
const siz = unsafe.Sizeof(mib[0])
// NOTE(rsc): It seems strange to set the buffer to have
// size CTL_MAXNAME+2 but use only CTL_MAXNAME
// as the size. I don't know why the +2 is here, but the
// kernel uses +2 for its own implementation of this function.
// I am scared that if we don't include the +2 here, the kernel
// will silently write 2 words farther than we specify
// and we'll get memory corruption.
var buf [CTL_MAXNAME + 2]_C_int
n := uintptr(CTL_MAXNAME) * siz
p := (*byte)(unsafe.Pointer(&buf[0]))
bytes, err := ByteSliceFromString(name)
if err != nil {
return nil, err
}
// Magic sysctl: "setting" 0.3 to a string name
// lets you read back the array of integers form.
if err = sysctl([]_C_int{0, 3}, p, &n, &bytes[0], uintptr(len(name))); err != nil {
return nil, err
}
return buf[0 : n/siz], nil
}
func Pipe(p []int) (err error) {
return Pipe2(p, 0)
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) error {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err := pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return err
}
func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
var value IPMreqn
vallen := _Socklen(SizeofIPMreqn)
errno := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
return &value, errno
}
func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
}
func Accept4(fd, flags int) (nfd int, sa Sockaddr, err error) {
var rsa RawSockaddrAny
var len _Socklen = SizeofSockaddrAny
nfd, err = accept4(fd, &rsa, &len, flags)
if err != nil {
return
}
if len > SizeofSockaddrAny {
panic("RawSockaddrAny too small")
}
sa, err = anyToSockaddr(fd, &rsa)
if err != nil {
Close(nfd)
nfd = 0
}
return
}
const ImplementsGetwd = true
//sys Getcwd(buf []byte) (n int, err error) = SYS___GETCWD
func Getwd() (string, error) {
var buf [PathMax]byte
_, err := Getcwd(buf[0:])
if err != nil {
return "", err
}
n := clen(buf[:])
if n < 1 {
return "", EINVAL
}
return string(buf[:n]), nil
}
func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
var _p0 unsafe.Pointer
var bufsize uintptr
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
}
r0, _, e1 := Syscall(SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
n = int(r0)
if e1 != 0 {
err = e1
}
return
}
func setattrlistTimes(path string, times []Timespec, flags int) error {
// used on Darwin for UtimesNano
return ENOSYS
}
//sys ioctl(fd int, req uint, arg uintptr) (err error)
// ioctl itself should not be exposed directly, but additional get/set
// functions for specific types are permissible.
// IoctlSetInt performs an ioctl operation which sets an integer value
// on fd, using the specified request number.
func IoctlSetInt(fd int, req uint, value int) error {
return ioctl(fd, req, uintptr(value))
}
func ioctlSetWinsize(fd int, req uint, value *Winsize) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
func ioctlSetTermios(fd int, req uint, value *Termios) error {
return ioctl(fd, req, uintptr(unsafe.Pointer(value)))
}
// IoctlGetInt performs an ioctl operation which gets an integer value
// from fd, using the specified request number.
func IoctlGetInt(fd int, req uint) (int, error) {
var value int
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return value, err
}
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
var value Winsize
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return &value, err
}
func IoctlGetTermios(fd int, req uint) (*Termios, error) {
var value Termios
err := ioctl(fd, req, uintptr(unsafe.Pointer(&value)))
return &value, err
}
func Uname(uname *Utsname) error {
mib := []_C_int{CTL_KERN, KERN_OSTYPE}
n := unsafe.Sizeof(uname.Sysname)
if err := sysctl(mib, &uname.Sysname[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_HOSTNAME}
n = unsafe.Sizeof(uname.Nodename)
if err := sysctl(mib, &uname.Nodename[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_OSRELEASE}
n = unsafe.Sizeof(uname.Release)
if err := sysctl(mib, &uname.Release[0], &n, nil, 0); err != nil {
return err
}
mib = []_C_int{CTL_KERN, KERN_VERSION}
n = unsafe.Sizeof(uname.Version)
if err := sysctl(mib, &uname.Version[0], &n, nil, 0); err != nil {
return err
}
// The version might have newlines or tabs in it, convert them to
// spaces.
for i, b := range uname.Version {
if b == '\n' || b == '\t' {
if i == len(uname.Version)-1 {
uname.Version[i] = 0
} else {
uname.Version[i] = ' '
}
}
}
mib = []_C_int{CTL_HW, HW_MACHINE}
n = unsafe.Sizeof(uname.Machine)
if err := sysctl(mib, &uname.Machine[0], &n, nil, 0); err != nil {
return err
}
return nil
}
/*
* Exposed directly
*/
//sys Access(path string, mode uint32) (err error)
//sys Adjtime(delta *Timeval, olddelta *Timeval) (err error)
//sys CapEnter() (err error)
//sys capRightsGet(version int, fd int, rightsp *CapRights) (err error) = SYS___CAP_RIGHTS_GET
//sys capRightsLimit(fd int, rightsp *CapRights) (err error)
//sys Chdir(path string) (err error)
//sys Chflags(path string, flags int) (err error)
//sys Chmod(path string, mode uint32) (err error)
//sys Chown(path string, uid int, gid int) (err error)
//sys Chroot(path string) (err error)
//sys Close(fd int) (err error)
//sys Dup(fd int) (nfd int, err error)
//sys Dup2(from int, to int) (err error)
//sys Exit(code int)
//sys ExtattrGetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
//sys ExtattrSetFd(fd int, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
//sys ExtattrDeleteFd(fd int, attrnamespace int, attrname string) (err error)
//sys ExtattrListFd(fd int, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
//sys ExtattrGetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
//sys ExtattrSetFile(file string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
//sys ExtattrDeleteFile(file string, attrnamespace int, attrname string) (err error)
//sys ExtattrListFile(file string, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
//sys ExtattrGetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
//sys ExtattrSetLink(link string, attrnamespace int, attrname string, data uintptr, nbytes int) (ret int, err error)
//sys ExtattrDeleteLink(link string, attrnamespace int, attrname string) (err error)
//sys ExtattrListLink(link string, attrnamespace int, data uintptr, nbytes int) (ret int, err error)
//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE
//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error)
//sys Fchdir(fd int) (err error)
//sys Fchflags(fd int, flags int) (err error)
//sys Fchmod(fd int, mode uint32) (err error)
//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
//sys Fchown(fd int, uid int, gid int) (err error)
//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
//sys Flock(fd int, how int) (err error)
//sys Fpathconf(fd int, name int) (val int, err error)
//sys Fstat(fd int, stat *Stat_t) (err error)
//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error)
//sys Fstatfs(fd int, stat *Statfs_t) (err error)
//sys Fsync(fd int) (err error)
//sys Ftruncate(fd int, length int64) (err error)
//sys Getdents(fd int, buf []byte) (n int, err error)
//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error)
//sys Getdtablesize() (size int)
//sysnb Getegid() (egid int)
//sysnb Geteuid() (uid int)
//sysnb Getgid() (gid int)
//sysnb Getpgid(pid int) (pgid int, err error)
//sysnb Getpgrp() (pgrp int)
//sysnb Getpid() (pid int)
//sysnb Getppid() (ppid int)
//sys Getpriority(which int, who int) (prio int, err error)
//sysnb Getrlimit(which int, lim *Rlimit) (err error)
//sysnb Getrusage(who int, rusage *Rusage) (err error)
//sysnb Getsid(pid int) (sid int, err error)
//sysnb Gettimeofday(tv *Timeval) (err error)
//sysnb Getuid() (uid int)
//sys Issetugid() (tainted bool)
//sys Kill(pid int, signum syscall.Signal) (err error)
//sys Kqueue() (fd int, err error)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error)
//sys Linkat(pathfd int, path string, linkfd int, link string, flags int) (err error)
//sys Listen(s int, backlog int) (err error)
//sys Lstat(path string, stat *Stat_t) (err error)
//sys Mkdir(path string, mode uint32) (err error)
//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
//sys Mkfifo(path string, mode uint32) (err error)
//sys Mknod(path string, mode uint32, dev int) (err error)
//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
//sys Open(path string, mode int, perm uint32) (fd int, err error)
//sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error)
//sys Pathconf(path string, name int) (val int, err error)
//sys Pread(fd int, p []byte, offset int64) (n int, err error)
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
//sys read(fd int, p []byte) (n int, err error)
//sys Readlink(path string, buf []byte) (n int, err error)
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
//sys Rename(from string, to string) (err error)
//sys Renameat(fromfd int, from string, tofd int, to string) (err error)
//sys Revoke(path string) (err error)
//sys Rmdir(path string) (err error)
//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error)
//sysnb Setegid(egid int) (err error)
//sysnb Seteuid(euid int) (err error)
//sysnb Setgid(gid int) (err error)
//sys Setlogin(name string) (err error)
//sysnb Setpgid(pid int, pgid int) (err error)
//sys Setpriority(which int, who int, prio int) (err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(which int, lim *Rlimit) (err error)
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tp *Timeval) (err error)
//sysnb Setuid(uid int) (err error)
//sys Stat(path string, stat *Stat_t) (err error)
//sys Statfs(path string, stat *Statfs_t) (err error)
//sys Symlink(path string, link string) (err error)
//sys Symlinkat(oldpath string, newdirfd int, newpath string) (err error)
//sys Sync() (err error)
//sys Truncate(path string, length int64) (err error)
//sys Umask(newmask int) (oldmask int)
//sys Undelete(path string) (err error)
//sys Unlink(path string) (err error)
//sys Unlinkat(dirfd int, path string, flags int) (err error)
//sys Unmount(path string, flags int) (err error)
//sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error)
//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ
//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE
//sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error)
//sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
/*
* Unimplemented
*/
// Profil
// Sigaction
// Sigprocmask
// Getlogin
// Sigpending
// Sigaltstack
// Ioctl
// Reboot
// Execve
// Vfork
// Sbrk
// Sstk
// Ovadvise
// Mincore
// Setitimer
// Swapon
// Select
// Sigsuspend
// Readv
// Writev
// Nfssvc
// Getfh
// Quotactl
// Mount
// Csops
// Waitid
// Add_profil
// Kdebug_trace
// Sigreturn
// Atsocket
// Kqueue_from_portset_np
// Kqueue_portset
// Getattrlist
// Setattrlist
// Getdirentriesattr
// Searchfs
// Delete
// Copyfile
// Watchevent
// Waitevent
// Modwatch
// Fsctl
// Initgroups
// Posix_spawn
// Nfsclnt
// Fhopen
// Minherit
// Semsys
// Msgsys
// Shmsys
// Semctl
// Semget
// Semop
// Msgctl
// Msgget
// Msgsnd
// Msgrcv
// Shmat
// Shmctl
// Shmdt
// Shmget
// Shm_open
// Shm_unlink
// Sem_open
// Sem_close
// Sem_unlink
// Sem_wait
// Sem_trywait
// Sem_post
// Sem_getvalue
// Sem_init
// Sem_destroy
// Open_extended
// Umask_extended
// Stat_extended
// Lstat_extended
// Fstat_extended
// Chmod_extended
// Fchmod_extended
// Access_extended
// Settid
// Gettid
// Setsgroups
// Getsgroups
// Setwgroups
// Getwgroups
// Mkfifo_extended
// Mkdir_extended
// Identitysvc
// Shared_region_check_np
// Shared_region_map_np
// __pthread_mutex_destroy
// __pthread_mutex_init
// __pthread_mutex_lock
// __pthread_mutex_trylock
// __pthread_mutex_unlock
// __pthread_cond_init
// __pthread_cond_destroy
// __pthread_cond_broadcast
// __pthread_cond_signal
// Setsid_with_pid
// __pthread_cond_timedwait
// Aio_fsync
// Aio_return
// Aio_suspend
// Aio_cancel
// Aio_error
// Aio_read
// Aio_write
// Lio_listio
// __pthread_cond_wait
// Iopolicysys
// __pthread_kill
// __pthread_sigmask
// __sigwait
// __disable_threadsignal
// __pthread_markcancel
// __pthread_canceled
// __semwait_signal
// Proc_info
// Stat64_extended
// Lstat64_extended
// Fstat64_extended
// __pthread_chdir
// __pthread_fchdir
// Audit
// Auditon
// Getauid
// Setauid
// Getaudit
// Setaudit
// Getaudit_addr
// Setaudit_addr
// Auditctl
// Bsdthread_create
// Bsdthread_terminate
// Stack_snapshot
// Bsdthread_register
// Workq_open
// Workq_ops
// __mac_execve
// __mac_syscall
// __mac_get_file
// __mac_set_file
// __mac_get_link
// __mac_set_link
// __mac_get_proc
// __mac_set_proc
// __mac_get_fd
// __mac_set_fd
// __mac_get_pid
// __mac_get_lcid
// __mac_get_lctx
// __mac_set_lctx
// Setlcid
// Read_nocancel
// Write_nocancel
// Open_nocancel
// Close_nocancel
// Wait4_nocancel
// Recvmsg_nocancel
// Sendmsg_nocancel
// Recvfrom_nocancel
// Accept_nocancel
// Fcntl_nocancel
// Select_nocancel
// Fsync_nocancel
// Connect_nocancel
// Sigsuspend_nocancel
// Readv_nocancel
// Writev_nocancel
// Sendto_nocancel
// Pread_nocancel
// Pwrite_nocancel
// Waitid_nocancel
// Poll_nocancel
// Msgsnd_nocancel
// Msgrcv_nocancel
// Sem_wait_nocancel
// Aio_suspend_nocancel
// __sigwait_nocancel
// __semwait_signal_nocancel
// __mac_mount
// __mac_get_mount
// __mac_getfsstat
| vendor/golang.org/x/sys/unix/syscall_freebsd.go | 0 | https://github.com/hashicorp/vault/commit/2c732cea0a5e72bed9cf58af8974746a1f1459cf | [
0.0003735627979040146,
0.00017737498274073005,
0.00016190648602787405,
0.0001727901108097285,
0.00002999323623953387
] |
{
"id": 0,
"code_window": [
"\n",
"\tvar unaryInterceptors []grpc.UnaryClientInterceptor\n",
"\n",
"\tif tracer := ctx.AmbientCtx.Tracer; tracer != nil {\n",
"\t\t// We use a SpanInclusionFunc to circumvent the interceptor's work when\n",
"\t\t// tracing is disabled. Otherwise, the interceptor causes an increase in\n",
"\t\t// the number of packets (even with an empty context!). See #17177.\n",
"\t\tunaryInterceptors = append(unaryInterceptors,\n",
"\t\t\totgrpc.OpenTracingClientInterceptor(tracer,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/rpc/context.go",
"type": "replace",
"edit_start_line_idx": 692
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rpc
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
"math"
"net"
"sync"
"sync/atomic"
"time"
circuit "github.com/cockroachdb/circuitbreaker"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/growstack"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/netutil"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"golang.org/x/sync/syncmap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
encodingproto "google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
)
func init() {
// Disable GRPC tracing. This retains a subset of messages for
// display on /debug/requests, which is very expensive for
// snapshots. Until we can be more selective about what is retained
// in traces, we must disable tracing entirely.
// https://github.com/grpc/grpc-go/issues/695
grpc.EnableTracing = false
}
const (
// The coefficient by which the maximum offset is multiplied to determine the
// maximum acceptable measurement latency.
maximumPingDurationMult = 2
)
const (
defaultWindowSize = 65535
initialWindowSize = defaultWindowSize * 32 // for an RPC
initialConnWindowSize = initialWindowSize * 16 // for a connection
)
// sourceAddr is the environment-provided local address for outgoing
// connections.
var sourceAddr = func() net.Addr {
const envKey = "COCKROACH_SOURCE_IP_ADDRESS"
if sourceAddr, ok := envutil.EnvString(envKey, 0); ok {
sourceIP := net.ParseIP(sourceAddr)
if sourceIP == nil {
panic(fmt.Sprintf("unable to parse %s '%s' as IP address", envKey, sourceAddr))
}
return &net.TCPAddr{
IP: sourceIP,
}
}
return nil
}()
var enableRPCCompression = envutil.EnvOrDefaultBool("COCKROACH_ENABLE_RPC_COMPRESSION", true)
// spanInclusionFuncForServer is used as a SpanInclusionFunc for the server-side
// of RPCs, deciding for which operations the gRPC opentracing interceptor should
// create a span.
func spanInclusionFuncForServer(
t *tracing.Tracer, parentSpanCtx opentracing.SpanContext, method string, req, resp interface{},
) bool {
// Is client tracing?
return (parentSpanCtx != nil && !tracing.IsNoopContext(parentSpanCtx)) ||
// Should we trace regardless of the client? This is useful for calls coming
// through the HTTP->RPC gateway (i.e. the AdminUI), where client is never
// tracing.
t.AlwaysTrace()
}
// spanInclusionFuncForClient is used as a SpanInclusionFunc for the client-side
// of RPCs, deciding for which operations the gRPC opentracing interceptor should
// create a span.
func spanInclusionFuncForClient(
parentSpanCtx opentracing.SpanContext, method string, req, resp interface{},
) bool {
return parentSpanCtx != nil && !tracing.IsNoopContext(parentSpanCtx)
}
func requireSuperUser(ctx context.Context) error {
// TODO(marc): grpc's authentication model (which gives credential access in
// the request handler) doesn't really fit with the current design of the
// security package (which assumes that TLS state is only given at connection
// time) - that should be fixed.
if grpcutil.IsLocalRequestContext(ctx) {
// This is an in-process request. Bypass authentication check.
} else if peer, ok := peer.FromContext(ctx); ok {
if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok {
certUsers, err := security.GetCertificateUsers(&tlsInfo.State)
if err != nil {
return err
}
// TODO(benesch): the vast majority of RPCs should be limited to just
// NodeUser. This is not a security concern, as RootUser has access to
// read and write all data, merely good hygiene. For example, there is
// no reason to permit the root user to send raw Raft RPCs.
if !security.ContainsUser(security.NodeUser, certUsers) &&
!security.ContainsUser(security.RootUser, certUsers) {
return errors.Errorf("user %s is not allowed to perform this RPC", certUsers)
}
}
} else {
return errors.New("internal authentication error: TLSInfo is not available in request context")
}
return nil
}
// NewServer is a thin wrapper around grpc.NewServer that registers a heartbeat
// service.
func NewServer(ctx *Context) *grpc.Server {
return NewServerWithInterceptor(ctx, nil)
}
// NewServerWithInterceptor is like NewServer, but accepts an additional
// interceptor which is called before streaming and unary RPCs and may inject an
// error.
func NewServerWithInterceptor(
ctx *Context, interceptor func(fullMethod string) error,
) *grpc.Server {
opts := []grpc.ServerOption{
// The limiting factor for lowering the max message size is the fact
// that a single large kv can be sent over the network in one message.
// Our maximum kv size is unlimited, so we need this to be very large.
//
// TODO(peter,tamird): need tests before lowering.
grpc.MaxRecvMsgSize(math.MaxInt32),
grpc.MaxSendMsgSize(math.MaxInt32),
// Adjust the stream and connection window sizes. The gRPC defaults are too
// low for high latency connections.
grpc.InitialWindowSize(initialWindowSize),
grpc.InitialConnWindowSize(initialConnWindowSize),
// The default number of concurrent streams/requests on a client connection
// is 100, while the server is unlimited. The client setting can only be
// controlled by adjusting the server value. Set a very large value for the
// server value so that we have no fixed limit on the number of concurrent
// streams/requests on either the client or server.
grpc.MaxConcurrentStreams(math.MaxInt32),
grpc.KeepaliveParams(serverKeepalive),
grpc.KeepaliveEnforcementPolicy(serverEnforcement),
// A stats handler to measure server network stats.
grpc.StatsHandler(&ctx.stats),
}
if !ctx.Insecure {
tlsConfig, err := ctx.GetServerTLSConfig()
if err != nil {
panic(err)
}
opts = append(opts, grpc.Creds(credentials.NewTLS(tlsConfig)))
}
var unaryInterceptor grpc.UnaryServerInterceptor
var streamInterceptor grpc.StreamServerInterceptor
if tracer := ctx.AmbientCtx.Tracer; tracer != nil {
// We use a SpanInclusionFunc to save a bit of unnecessary work when
// tracing is disabled.
unaryInterceptor = otgrpc.OpenTracingServerInterceptor(
tracer,
otgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(
func(
parentSpanCtx opentracing.SpanContext,
method string,
req, resp interface{}) bool {
// This anonymous func serves to bind the tracer for
// spanInclusionFuncForServer.
return spanInclusionFuncForServer(
tracer.(*tracing.Tracer), parentSpanCtx, method, req, resp)
})),
)
// TODO(tschottdorf): should set up tracing for stream-based RPCs as
// well. The otgrpc package has no such facility, but there's also this:
//
// https://github.com/grpc-ecosystem/go-grpc-middleware/tree/master/tracing/opentracing
}
// TODO(tschottdorf): when setting up the interceptors below, could make the
// functions a wee bit more performant by hoisting some of the nil checks
// out. Doubt measurements can tell the difference though.
if interceptor != nil {
prevUnaryInterceptor := unaryInterceptor
unaryInterceptor = func(
ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (interface{}, error) {
if err := interceptor(info.FullMethod); err != nil {
return nil, err
}
if prevUnaryInterceptor != nil {
return prevUnaryInterceptor(ctx, req, info, handler)
}
return handler(ctx, req)
}
}
if interceptor != nil {
prevStreamInterceptor := streamInterceptor
streamInterceptor = func(
srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler,
) error {
if err := interceptor(info.FullMethod); err != nil {
return err
}
if prevStreamInterceptor != nil {
return prevStreamInterceptor(srv, stream, info, handler)
}
return handler(srv, stream)
}
}
if !ctx.Insecure {
prevUnaryInterceptor := unaryInterceptor
unaryInterceptor = func(
ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (interface{}, error) {
if err := requireSuperUser(ctx); err != nil {
return nil, err
}
if prevUnaryInterceptor != nil {
return prevUnaryInterceptor(ctx, req, info, handler)
}
return handler(ctx, req)
}
prevStreamInterceptor := streamInterceptor
streamInterceptor = func(
srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler,
) error {
if err := requireSuperUser(stream.Context()); err != nil {
return err
}
if prevStreamInterceptor != nil {
return prevStreamInterceptor(srv, stream, info, handler)
}
return handler(srv, stream)
}
}
if unaryInterceptor != nil {
opts = append(opts, grpc.UnaryInterceptor(unaryInterceptor))
}
if streamInterceptor != nil {
opts = append(opts, grpc.StreamInterceptor(streamInterceptor))
}
s := grpc.NewServer(opts...)
RegisterHeartbeatServer(s, &HeartbeatService{
clock: ctx.LocalClock,
remoteClockMonitor: ctx.RemoteClocks,
clusterName: ctx.clusterName,
disableClusterNameVerification: ctx.disableClusterNameVerification,
clusterID: &ctx.ClusterID,
nodeID: &ctx.NodeID,
settings: ctx.settings,
testingAllowNamedRPCToAnonymousServer: ctx.TestingAllowNamedRPCToAnonymousServer,
})
return s
}
type heartbeatResult struct {
everSucceeded bool // true if the heartbeat has ever succeeded
err error // heartbeat error, initialized to ErrNotHeartbeated
}
// state is a helper to return the heartbeatState implied by a heartbeatResult.
func (hr heartbeatResult) state() (s heartbeatState) {
switch {
case !hr.everSucceeded && hr.err != nil:
s = heartbeatInitializing
case hr.everSucceeded && hr.err == nil:
s = heartbeatNominal
case hr.everSucceeded && hr.err != nil:
s = heartbeatFailed
}
return s
}
// Connection is a wrapper around grpc.ClientConn. It prevents the underlying
// connection from being used until it has been validated via heartbeat.
type Connection struct {
grpcConn *grpc.ClientConn
dialErr error // error while dialing; if set, connection is unusable
heartbeatResult atomic.Value // result of latest heartbeat
initialHeartbeatDone chan struct{} // closed after first heartbeat
stopper *stop.Stopper
// remoteNodeID implies checking the remote node ID. 0 when unknown,
// non-zero to check with remote node. This is constant throughout
// the lifetime of a Connection object.
remoteNodeID roachpb.NodeID
initOnce sync.Once
}
func newConnectionToNodeID(stopper *stop.Stopper, remoteNodeID roachpb.NodeID) *Connection {
c := &Connection{
initialHeartbeatDone: make(chan struct{}),
stopper: stopper,
remoteNodeID: remoteNodeID,
}
c.heartbeatResult.Store(heartbeatResult{err: ErrNotHeartbeated})
return c
}
// Connect returns the underlying grpc.ClientConn after it has been validated,
// or an error if dialing or validation fails.
func (c *Connection) Connect(ctx context.Context) (*grpc.ClientConn, error) {
if c.dialErr != nil {
return nil, c.dialErr
}
// Wait for initial heartbeat.
select {
case <-c.initialHeartbeatDone:
case <-c.stopper.ShouldStop():
return nil, errors.Errorf("stopped")
case <-ctx.Done():
return nil, ctx.Err()
}
// If connection is invalid, return latest heartbeat error.
h := c.heartbeatResult.Load().(heartbeatResult)
if !h.everSucceeded {
// If we've never succeeded, h.err will be ErrNotHeartbeated.
return nil, netutil.NewInitialHeartBeatFailedError(h.err)
}
return c.grpcConn, nil
}
// Health returns an error indicating the success or failure of the
// connection's latest heartbeat. Returns ErrNotHeartbeated if the
// first heartbeat has not completed.
func (c *Connection) Health() error {
return c.heartbeatResult.Load().(heartbeatResult).err
}
// Context contains the fields required by the rpc framework.
type Context struct {
*base.Config
AmbientCtx log.AmbientContext
LocalClock *hlc.Clock
breakerClock breakerClock
Stopper *stop.Stopper
RemoteClocks *RemoteClockMonitor
masterCtx context.Context
heartbeatInterval time.Duration
heartbeatTimeout time.Duration
HeartbeatCB func()
rpcCompression bool
localInternalClient roachpb.InternalClient
conns syncmap.Map
stats StatsHandler
ClusterID base.ClusterIDContainer
NodeID base.NodeIDContainer
settings *cluster.Settings
clusterName string
disableClusterNameVerification bool
metrics Metrics
// For unittesting.
BreakerFactory func() *circuit.Breaker
testingDialOpts []grpc.DialOption
testingKnobs ContextTestingKnobs
// For testing. See the comment on the same field in HeartbeatService.
TestingAllowNamedRPCToAnonymousServer bool
}
// connKey is used as key in the Context.conns map.
// Connections which carry a different class but share a target and nodeID
// will always specify distinct connections. Different remote node IDs get
// distinct *Connection objects to ensure that we don't mis-route RPC
// requests in the face of address reuse. Gossip connections and other
// non-Internal users of the Context are free to dial nodes without
// specifying a node ID (see GRPCUnvalidatedDial()) however later calls to
// Dial with the same target and class with a node ID will create a new
// underlying connection. The inverse however is not true, a connection
// dialed without a node ID will use an existing connection to a matching
// (targetAddr, class) pair.
type connKey struct {
targetAddr string
nodeID roachpb.NodeID
class ConnectionClass
}
// NewContext creates an rpc Context with the supplied values.
func NewContext(
ambient log.AmbientContext,
baseCtx *base.Config,
hlcClock *hlc.Clock,
stopper *stop.Stopper,
st *cluster.Settings,
) *Context {
return NewContextWithTestingKnobs(ambient, baseCtx, hlcClock, stopper, st,
ContextTestingKnobs{})
}
// NewContextWithTestingKnobs creates an rpc Context with the supplied values.
func NewContextWithTestingKnobs(
ambient log.AmbientContext,
baseCtx *base.Config,
hlcClock *hlc.Clock,
stopper *stop.Stopper,
st *cluster.Settings,
knobs ContextTestingKnobs,
) *Context {
if hlcClock == nil {
panic("nil clock is forbidden")
}
ctx := &Context{
AmbientCtx: ambient,
Config: baseCtx,
LocalClock: hlcClock,
breakerClock: breakerClock{
clock: hlcClock,
},
rpcCompression: enableRPCCompression,
settings: st,
clusterName: baseCtx.ClusterName,
disableClusterNameVerification: baseCtx.DisableClusterNameVerification,
testingKnobs: knobs,
}
var cancel context.CancelFunc
ctx.masterCtx, cancel = context.WithCancel(ambient.AnnotateCtx(context.Background()))
ctx.Stopper = stopper
ctx.heartbeatInterval = baseCtx.RPCHeartbeatInterval
ctx.RemoteClocks = newRemoteClockMonitor(
ctx.LocalClock, 10*ctx.heartbeatInterval, baseCtx.HistogramWindowInterval)
ctx.heartbeatTimeout = 2 * ctx.heartbeatInterval
ctx.metrics = makeMetrics()
stopper.RunWorker(ctx.masterCtx, func(context.Context) {
<-stopper.ShouldQuiesce()
cancel()
ctx.conns.Range(func(k, v interface{}) bool {
conn := v.(*Connection)
conn.initOnce.Do(func() {
// Make sure initialization is not in progress when we're removing the
// conn. We need to set the error in case we win the race against the
// real initialization code.
if conn.dialErr == nil {
conn.dialErr = &roachpb.NodeUnavailableError{}
}
})
ctx.removeConn(conn, k.(connKey))
return true
})
})
if knobs.ClusterID != nil {
ctx.ClusterID.Set(ctx.masterCtx, *knobs.ClusterID)
}
return ctx
}
// ClusterName retrieves the configured cluster name.
func (ctx *Context) ClusterName() string {
if ctx == nil {
// This is used in tests.
return "<MISSING RPC CONTEXT>"
}
return ctx.clusterName
}
// GetStatsMap returns a map of network statistics maintained by the
// internal stats handler. The map is from the remote network address
// (in string form) to an rpc.Stats object.
func (ctx *Context) GetStatsMap() *syncmap.Map {
return &ctx.stats.stats
}
// Metrics returns the Context's Metrics struct.
func (ctx *Context) Metrics() *Metrics {
return &ctx.metrics
}
// GetLocalInternalClientForAddr returns the context's internal batch client
// for target, if it exists.
func (ctx *Context) GetLocalInternalClientForAddr(
target string, nodeID roachpb.NodeID,
) roachpb.InternalClient {
if target == ctx.AdvertiseAddr && nodeID == ctx.NodeID.Get() {
return ctx.localInternalClient
}
return nil
}
type internalClientAdapter struct {
roachpb.InternalServer
}
func (a internalClientAdapter) Batch(
ctx context.Context, ba *roachpb.BatchRequest, _ ...grpc.CallOption,
) (*roachpb.BatchResponse, error) {
return a.InternalServer.Batch(ctx, ba)
}
type rangeFeedClientAdapter struct {
ctx context.Context
eventC chan *roachpb.RangeFeedEvent
errC chan error
}
// roachpb.Internal_RangeFeedServer methods.
func (a rangeFeedClientAdapter) Recv() (*roachpb.RangeFeedEvent, error) {
// Prioritize eventC. Both channels are buffered and the only guarantee we
// have is that once an error is sent on errC no other events will be sent
// on eventC again.
select {
case e := <-a.eventC:
return e, nil
case err := <-a.errC:
select {
case e := <-a.eventC:
a.errC <- err
return e, nil
default:
return nil, err
}
}
}
// roachpb.Internal_RangeFeedServer methods.
func (a rangeFeedClientAdapter) Send(e *roachpb.RangeFeedEvent) error {
select {
case a.eventC <- e:
return nil
case <-a.ctx.Done():
return a.ctx.Err()
}
}
// grpc.ClientStream methods.
func (rangeFeedClientAdapter) Header() (metadata.MD, error) { panic("unimplemented") }
func (rangeFeedClientAdapter) Trailer() metadata.MD { panic("unimplemented") }
func (rangeFeedClientAdapter) CloseSend() error { panic("unimplemented") }
// grpc.ServerStream methods.
func (rangeFeedClientAdapter) SetHeader(metadata.MD) error { panic("unimplemented") }
func (rangeFeedClientAdapter) SendHeader(metadata.MD) error { panic("unimplemented") }
func (rangeFeedClientAdapter) SetTrailer(metadata.MD) { panic("unimplemented") }
// grpc.Stream methods.
func (a rangeFeedClientAdapter) Context() context.Context { return a.ctx }
func (rangeFeedClientAdapter) SendMsg(m interface{}) error { panic("unimplemented") }
func (rangeFeedClientAdapter) RecvMsg(m interface{}) error { panic("unimplemented") }
var _ roachpb.Internal_RangeFeedClient = rangeFeedClientAdapter{}
var _ roachpb.Internal_RangeFeedServer = rangeFeedClientAdapter{}
func (a internalClientAdapter) RangeFeed(
ctx context.Context, args *roachpb.RangeFeedRequest, _ ...grpc.CallOption,
) (roachpb.Internal_RangeFeedClient, error) {
ctx, cancel := context.WithCancel(ctx)
rfAdapter := rangeFeedClientAdapter{
ctx: ctx,
eventC: make(chan *roachpb.RangeFeedEvent, 128),
errC: make(chan error, 1),
}
go func() {
defer cancel()
err := a.InternalServer.RangeFeed(args, rfAdapter)
if err == nil {
err = io.EOF
}
rfAdapter.errC <- err
}()
return rfAdapter, nil
}
var _ roachpb.InternalClient = internalClientAdapter{}
// IsLocal returns true if the given InternalClient is local.
func IsLocal(iface roachpb.InternalClient) bool {
_, ok := iface.(internalClientAdapter)
return ok // internalClientAdapter is used for local connections.
}
// SetLocalInternalServer sets the context's local internal batch server.
func (ctx *Context) SetLocalInternalServer(internalServer roachpb.InternalServer) {
ctx.localInternalClient = internalClientAdapter{internalServer}
}
// removeConn removes the given connection from the pool. The supplied connKeys
// must represent *all* the keys under among which the connection was shared.
func (ctx *Context) removeConn(conn *Connection, keys ...connKey) {
for _, key := range keys {
ctx.conns.Delete(key)
}
if log.V(1) {
log.Infof(ctx.masterCtx, "closing %+v", keys)
}
if grpcConn := conn.grpcConn; grpcConn != nil {
if err := grpcConn.Close(); err != nil && !grpcutil.IsClosedConnection(err) {
if log.V(1) {
log.Errorf(ctx.masterCtx, "failed to close client connection: %v", err)
}
}
}
}
// GRPCDialOptions returns the minimal `grpc.DialOption`s necessary to connect
// to a server created with `NewServer`.
//
// At the time of writing, this is being used for making net.Pipe-based
// connections, so only those options that affect semantics are included. In
// particular, performance tuning options are omitted. Decompression is
// necessarily included to support compression-enabled servers, and compression
// is included for symmetry. These choices are admittedly subjective.
func (ctx *Context) GRPCDialOptions() ([]grpc.DialOption, error) {
return ctx.grpcDialOptions("", DefaultClass)
}
// grpcDialOptions extends GRPCDialOptions to support a connection class for use
// with TestingKnobs.
func (ctx *Context) grpcDialOptions(
target string, class ConnectionClass,
) ([]grpc.DialOption, error) {
var dialOpts []grpc.DialOption
if ctx.Insecure {
dialOpts = append(dialOpts, grpc.WithInsecure())
} else {
tlsConfig, err := ctx.GetClientTLSConfig()
if err != nil {
return nil, err
}
dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
}
// The limiting factor for lowering the max message size is the fact
// that a single large kv can be sent over the network in one message.
// Our maximum kv size is unlimited, so we need this to be very large.
//
// TODO(peter,tamird): need tests before lowering.
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32),
grpc.MaxCallSendMsgSize(math.MaxInt32),
))
// Compression is enabled separately from decompression to allow staged
// rollout.
if ctx.rpcCompression {
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor((snappyCompressor{}).Name())))
}
var unaryInterceptors []grpc.UnaryClientInterceptor
if tracer := ctx.AmbientCtx.Tracer; tracer != nil {
// We use a SpanInclusionFunc to circumvent the interceptor's work when
// tracing is disabled. Otherwise, the interceptor causes an increase in
// the number of packets (even with an empty context!). See #17177.
unaryInterceptors = append(unaryInterceptors,
otgrpc.OpenTracingClientInterceptor(tracer,
otgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient))))
}
if ctx.testingKnobs.UnaryClientInterceptor != nil {
testingUnaryInterceptor := ctx.testingKnobs.UnaryClientInterceptor(target, class)
if testingUnaryInterceptor != nil {
unaryInterceptors = append(unaryInterceptors, testingUnaryInterceptor)
}
}
dialOpts = append(dialOpts, grpc.WithChainUnaryInterceptor(unaryInterceptors...))
if ctx.testingKnobs.StreamClientInterceptor != nil {
testingStreamInterceptor := ctx.testingKnobs.StreamClientInterceptor(target, class)
if testingStreamInterceptor != nil {
dialOpts = append(dialOpts, grpc.WithStreamInterceptor(testingStreamInterceptor))
}
}
return dialOpts, nil
}
// growStackCodec wraps the default grpc/encoding/proto codec to detect
// BatchRequest rpcs and grow the stack prior to Unmarshaling.
type growStackCodec struct {
encoding.Codec
}
// Unmarshal detects BatchRequests and calls growstack.Grow before calling
// through to the underlying codec.
func (c growStackCodec) Unmarshal(data []byte, v interface{}) error {
if _, ok := v.(*roachpb.BatchRequest); ok {
growstack.Grow()
}
return c.Codec.Unmarshal(data, v)
}
// Install the growStackCodec over the default proto codec in order to grow the
// stack for BatchRequest RPCs prior to unmarshaling.
func init() {
protoCodec := encoding.GetCodec(encodingproto.Name)
encoding.RegisterCodec(growStackCodec{Codec: protoCodec})
}
// onlyOnceDialer implements the grpc.WithDialer interface but only
// allows a single connection attempt. If a reconnection is attempted,
// redialChan is closed to signal a higher-level retry loop. This
// ensures that our initial heartbeat (and its version/clusterID
// validation) occurs on every new connection.
type onlyOnceDialer struct {
syncutil.Mutex
dialed bool
closed bool
redialChan chan struct{}
}
func (ood *onlyOnceDialer) dial(ctx context.Context, addr string) (net.Conn, error) {
ood.Lock()
defer ood.Unlock()
if !ood.dialed {
ood.dialed = true
dialer := net.Dialer{
LocalAddr: sourceAddr,
}
return dialer.DialContext(ctx, "tcp", addr)
} else if !ood.closed {
ood.closed = true
close(ood.redialChan)
}
return nil, grpcutil.ErrCannotReuseClientConn
}
type dialerFunc func(context.Context, string) (net.Conn, error)
type artificialLatencyDialer struct {
dialerFunc dialerFunc
latencyMS int
}
func (ald *artificialLatencyDialer) dial(ctx context.Context, addr string) (net.Conn, error) {
conn, err := ald.dialerFunc(ctx, addr)
if err != nil {
return conn, err
}
return delayingConn{
Conn: conn,
latency: time.Duration(ald.latencyMS) * time.Millisecond,
readBuf: new(bytes.Buffer),
}, nil
}
type delayingListener struct {
net.Listener
}
// NewDelayingListener creates a net.Listener that introduces a set delay on its connections.
func NewDelayingListener(l net.Listener) net.Listener {
return delayingListener{Listener: l}
}
func (d delayingListener) Accept() (net.Conn, error) {
c, err := d.Listener.Accept()
if err != nil {
return nil, err
}
return delayingConn{
Conn: c,
// Put a default latency as the server's conn. This value will get populated
// as packets are exchanged across the delayingConnections.
latency: time.Duration(0) * time.Millisecond,
readBuf: new(bytes.Buffer),
}, nil
}
type delayingConn struct {
net.Conn
latency time.Duration
lastSendEnd time.Time
readBuf *bytes.Buffer
}
func (d delayingConn) Write(b []byte) (n int, err error) {
tNow := timeutil.Now()
if d.lastSendEnd.Before(tNow) {
d.lastSendEnd = tNow
}
hdr := delayingHeader{
Magic: magic,
ReadTime: d.lastSendEnd.Add(d.latency).UnixNano(),
Sz: int32(len(b)),
DelayMS: int32(d.latency / time.Millisecond),
}
if err := binary.Write(d.Conn, binary.BigEndian, hdr); err != nil {
return n, err
}
x, err := d.Conn.Write(b)
n += x
return n, err
}
func (d delayingConn) Read(b []byte) (n int, err error) {
if d.readBuf.Len() == 0 {
var hdr delayingHeader
if err := binary.Read(d.Conn, binary.BigEndian, &hdr); err != nil {
return 0, err
}
// If we somehow don't get our expected magic, throw an error.
if hdr.Magic != magic {
panic(errors.New("didn't get expected magic bytes header"))
// TODO (rohany): I can't get this to work. I suspect that the problem
// is with that maybe the improperly parsed struct is not written back
// into the same binary format that it was read as. I tried this with sending
// the magic integer over first and saw the same thing.
} else {
d.latency = time.Duration(hdr.DelayMS) * time.Millisecond
defer func() {
time.Sleep(timeutil.Until(timeutil.Unix(0, hdr.ReadTime)))
}()
if _, err := io.CopyN(d.readBuf, d.Conn, int64(hdr.Sz)); err != nil {
return 0, err
}
}
}
return d.readBuf.Read(b)
}
const magic = 0xfeedfeed
type delayingHeader struct {
Magic int64
ReadTime int64
Sz int32
DelayMS int32
}
// GRPCDialRaw calls grpc.Dial with options appropriate for the context.
// Unlike GRPCDialNode, it does not start an RPC heartbeat to validate the
// connection. This connection will not be reconnected automatically;
// the returned channel is closed when a reconnection is attempted.
// This method implies a DefaultClass ConnectionClass for the returned
// ClientConn.
func (ctx *Context) GRPCDialRaw(target string) (*grpc.ClientConn, <-chan struct{}, error) {
return ctx.grpcDialRaw(target, 0, DefaultClass)
}
func (ctx *Context) grpcDialRaw(
target string, remoteNodeID roachpb.NodeID, class ConnectionClass,
) (*grpc.ClientConn, <-chan struct{}, error) {
dialOpts, err := ctx.grpcDialOptions(target, class)
if err != nil {
return nil, nil, err
}
// Add a stats handler to measure client network stats.
dialOpts = append(dialOpts, grpc.WithStatsHandler(ctx.stats.newClient(target)))
dialOpts = append(dialOpts, grpc.WithBackoffMaxDelay(maxBackoff))
dialOpts = append(dialOpts, grpc.WithKeepaliveParams(clientKeepalive))
dialOpts = append(dialOpts,
grpc.WithInitialWindowSize(initialWindowSize),
grpc.WithInitialConnWindowSize(initialConnWindowSize))
dialer := onlyOnceDialer{
redialChan: make(chan struct{}),
}
dialerFunc := dialer.dial
if ctx.testingKnobs.ArtificialLatencyMap != nil {
latency := ctx.testingKnobs.ArtificialLatencyMap[target]
log.VEventf(ctx.masterCtx, 1, "Connecting to node %s (%d) with simulated latency %dms", target, remoteNodeID,
latency)
dialer := artificialLatencyDialer{
dialerFunc: dialerFunc,
latencyMS: latency,
}
dialerFunc = dialer.dial
}
dialOpts = append(dialOpts, grpc.WithContextDialer(dialerFunc))
// add testingDialOpts after our dialer because one of our tests
// uses a custom dialer (this disables the only-one-connection
// behavior and redialChan will never be closed).
dialOpts = append(dialOpts, ctx.testingDialOpts...)
if log.V(1) {
log.Infof(ctx.masterCtx, "dialing %s", target)
}
conn, err := grpc.DialContext(ctx.masterCtx, target, dialOpts...)
return conn, dialer.redialChan, err
}
// GRPCUnvalidatedDial uses GRPCDialNode and disables validation of the
// node ID between client and server. This function should only be
// used with the gossip client and CLI commands which can talk to any
// node. This method implies a SystemClass.
func (ctx *Context) GRPCUnvalidatedDial(target string) *Connection {
return ctx.grpcDialNodeInternal(target, 0, SystemClass)
}
// GRPCDialNode calls grpc.Dial with options appropriate for the
// context and class (see the comment on ConnectionClass).
//
// The remoteNodeID becomes a constraint on the expected node ID of
// the remote node; this is checked during heartbeats. The caller is
// responsible for ensuring the remote node ID is known prior to using
// this function.
func (ctx *Context) GRPCDialNode(
target string, remoteNodeID roachpb.NodeID, class ConnectionClass,
) *Connection {
if remoteNodeID == 0 && !ctx.TestingAllowNamedRPCToAnonymousServer {
log.Fatalf(context.TODO(), "invalid node ID 0 in GRPCDialNode()")
}
return ctx.grpcDialNodeInternal(target, remoteNodeID, class)
}
func (ctx *Context) grpcDialNodeInternal(
target string, remoteNodeID roachpb.NodeID, class ConnectionClass,
) *Connection {
thisConnKeys := []connKey{{target, remoteNodeID, class}}
value, ok := ctx.conns.Load(thisConnKeys[0])
if !ok {
value, _ = ctx.conns.LoadOrStore(thisConnKeys[0], newConnectionToNodeID(ctx.Stopper, remoteNodeID))
if remoteNodeID != 0 {
// If the first connection established at a target address is
// for a specific node ID, then we want to reuse that connection
// also for other dials (eg for gossip) which don't require a
// specific node ID. (We do this as an optimization to reduce
// the number of TCP connections alive between nodes. This is
// not strictly required for correctness.) This LoadOrStore will
// ensure we're registering the connection we just created for
// future use by these other dials.
//
// We need to be careful to unregister both connKeys when the
// connection breaks. Otherwise, we leak the entry below which
// "simulates" a hard network partition for anyone dialing without
// the nodeID (gossip).
//
// See:
// https://github.com/cockroachdb/cockroach/issues/37200
otherKey := connKey{target, 0, class}
if _, loaded := ctx.conns.LoadOrStore(otherKey, value); !loaded {
thisConnKeys = append(thisConnKeys, otherKey)
}
}
}
conn := value.(*Connection)
conn.initOnce.Do(func() {
// Either we kick off the heartbeat loop (and clean up when it's done),
// or we clean up the connKey entries immediately.
var redialChan <-chan struct{}
conn.grpcConn, redialChan, conn.dialErr = ctx.grpcDialRaw(target, remoteNodeID, class)
if conn.dialErr == nil {
if err := ctx.Stopper.RunTask(
ctx.masterCtx, "rpc.Context: grpc heartbeat", func(masterCtx context.Context) {
ctx.Stopper.RunWorker(masterCtx, func(masterCtx context.Context) {
err := ctx.runHeartbeat(conn, target, redialChan)
if err != nil && !grpcutil.IsClosedConnection(err) {
log.Errorf(masterCtx, "removing connection to %s due to error: %s", target, err)
}
ctx.removeConn(conn, thisConnKeys...)
})
}); err != nil {
conn.dialErr = err
}
}
if conn.dialErr != nil {
ctx.removeConn(conn, thisConnKeys...)
}
})
return conn
}
// NewBreaker creates a new circuit breaker properly configured for RPC
// connections. name is used internally for logging state changes of the
// returned breaker.
func (ctx *Context) NewBreaker(name string) *circuit.Breaker {
if ctx.BreakerFactory != nil {
return ctx.BreakerFactory()
}
return newBreaker(ctx.masterCtx, name, &ctx.breakerClock)
}
// ErrNotHeartbeated is returned by ConnHealth when we have not yet performed
// the first heartbeat.
var ErrNotHeartbeated = errors.New("not yet heartbeated")
func (ctx *Context) runHeartbeat(
conn *Connection, target string, redialChan <-chan struct{},
) (retErr error) {
ctx.metrics.HeartbeatLoopsStarted.Inc(1)
// setInitialHeartbeatDone is idempotent and is critical to notify Connect
// callers of the failure in the case where no heartbeat is ever sent.
state := updateHeartbeatState(&ctx.metrics, heartbeatNotRunning, heartbeatInitializing)
initialHeartbeatDone := false
setInitialHeartbeatDone := func() {
if !initialHeartbeatDone {
close(conn.initialHeartbeatDone)
initialHeartbeatDone = true
}
}
defer func() {
if retErr != nil {
ctx.metrics.HeartbeatLoopsExited.Inc(1)
}
updateHeartbeatState(&ctx.metrics, state, heartbeatNotRunning)
setInitialHeartbeatDone()
}()
maxOffset := ctx.LocalClock.MaxOffset()
maxOffsetNanos := maxOffset.Nanoseconds()
heartbeatClient := NewHeartbeatClient(conn.grpcConn)
var heartbeatTimer timeutil.Timer
defer heartbeatTimer.Stop()
// Give the first iteration a wait-free heartbeat attempt.
heartbeatTimer.Reset(0)
everSucceeded := false
for {
select {
case <-redialChan:
return grpcutil.ErrCannotReuseClientConn
case <-ctx.Stopper.ShouldQuiesce():
return nil
case <-heartbeatTimer.C:
heartbeatTimer.Read = true
}
if err := ctx.Stopper.RunTaskWithErr(ctx.masterCtx, "rpc heartbeat", func(goCtx context.Context) error {
// We re-mint the PingRequest to pick up any asynchronous update to clusterID.
clusterID := ctx.ClusterID.Get()
request := &PingRequest{
Addr: ctx.Addr,
MaxOffsetNanos: maxOffsetNanos,
ClusterID: &clusterID,
NodeID: conn.remoteNodeID,
ServerVersion: ctx.settings.Version.BinaryVersion(),
}
var response *PingResponse
sendTime := ctx.LocalClock.PhysicalTime()
ping := func(goCtx context.Context) (err error) {
// NB: We want the request to fail-fast (the default), otherwise we won't
// be notified of transport failures.
response, err = heartbeatClient.Ping(goCtx, request)
return err
}
var err error
if ctx.heartbeatTimeout > 0 {
err = contextutil.RunWithTimeout(goCtx, "rpc heartbeat", ctx.heartbeatTimeout, ping)
} else {
err = ping(goCtx)
}
if err == nil {
// We verify the cluster name on the initiator side (instead
// of the hearbeat service side, as done for the cluster ID
// and node ID checks) so that the operator who is starting a
// new node in a cluster and mistakenly joins the wrong
// cluster gets a chance to see the error message on their
// management console.
if !ctx.disableClusterNameVerification && !response.DisableClusterNameVerification {
err = errors.Wrap(
checkClusterName(ctx.clusterName, response.ClusterName),
"cluster name check failed on ping response")
}
}
if err == nil {
err = errors.Wrap(
checkVersion(goCtx, ctx.settings, response.ServerVersion),
"version compatibility check failed on ping response")
}
if err == nil {
everSucceeded = true
receiveTime := ctx.LocalClock.PhysicalTime()
// Only update the clock offset measurement if we actually got a
// successful response from the server.
pingDuration := receiveTime.Sub(sendTime)
maxOffset := ctx.LocalClock.MaxOffset()
if pingDuration > maximumPingDurationMult*maxOffset {
request.Offset.Reset()
} else {
// Offset and error are measured using the remote clock reading
// technique described in
// http://se.inf.tu-dresden.de/pubs/papers/SRDS1994.pdf, page 6.
// However, we assume that drift and min message delay are 0, for
// now.
request.Offset.MeasuredAt = receiveTime.UnixNano()
request.Offset.Uncertainty = (pingDuration / 2).Nanoseconds()
remoteTimeNow := timeutil.Unix(0, response.ServerTime).Add(pingDuration / 2)
request.Offset.Offset = remoteTimeNow.Sub(receiveTime).Nanoseconds()
}
ctx.RemoteClocks.UpdateOffset(ctx.masterCtx, target, request.Offset, pingDuration)
if cb := ctx.HeartbeatCB; cb != nil {
cb()
}
}
hr := heartbeatResult{
everSucceeded: everSucceeded,
err: err,
}
state = updateHeartbeatState(&ctx.metrics, state, hr.state())
conn.heartbeatResult.Store(hr)
setInitialHeartbeatDone()
return nil
}); err != nil {
return err
}
heartbeatTimer.Reset(ctx.heartbeatInterval)
}
}
| pkg/rpc/context.go | 1 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.9982616305351257,
0.03209305554628372,
0.0001589359890203923,
0.0001710595388431102,
0.16053979098796844
] |
{
"id": 0,
"code_window": [
"\n",
"\tvar unaryInterceptors []grpc.UnaryClientInterceptor\n",
"\n",
"\tif tracer := ctx.AmbientCtx.Tracer; tracer != nil {\n",
"\t\t// We use a SpanInclusionFunc to circumvent the interceptor's work when\n",
"\t\t// tracing is disabled. Otherwise, the interceptor causes an increase in\n",
"\t\t// the number of packets (even with an empty context!). See #17177.\n",
"\t\tunaryInterceptors = append(unaryInterceptors,\n",
"\t\t\totgrpc.OpenTracingClientInterceptor(tracer,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/rpc/context.go",
"type": "replace",
"edit_start_line_idx": 692
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package tree
import (
"bytes"
"fmt"
"reflect"
"strings"
"github.com/cockroachdb/errors"
)
// Visitor defines methods that are called for nodes during an expression or statement walk.
type Visitor interface {
// VisitPre is called for each node before recursing into that subtree. Upon return, if recurse
// is false, the visit will not recurse into the subtree (and VisitPost will not be called for
// this node).
//
// The returned Expr replaces the visited expression and can be used for rewriting expressions.
// The function should NOT modify nodes in-place; it should make copies of nodes. The Walk
// infrastructure will automatically make copies of parents as needed.
VisitPre(expr Expr) (recurse bool, newExpr Expr)
// VisitPost is called for each node after recursing into the subtree. The returned Expr
// replaces the visited expression and can be used for rewriting expressions.
//
// The returned Expr replaces the visited expression and can be used for rewriting expressions.
// The function should NOT modify nodes in-place; it should make and return copies of nodes. The
// Walk infrastructure will automatically make copies of parents as needed.
VisitPost(expr Expr) (newNode Expr)
}
// Walk implements the Expr interface.
func (expr *AndExpr) Walk(v Visitor) Expr {
left, changedL := WalkExpr(v, expr.Left)
right, changedR := WalkExpr(v, expr.Right)
if changedL || changedR {
exprCopy := *expr
exprCopy.Left = left
exprCopy.Right = right
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *AnnotateTypeExpr) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *BinaryExpr) Walk(v Visitor) Expr {
left, changedL := WalkExpr(v, expr.Left)
right, changedR := WalkExpr(v, expr.Right)
if changedL || changedR {
exprCopy := *expr
exprCopy.Left = left
exprCopy.Right = right
return &exprCopy
}
return expr
}
// copyNode makes a copy of this Expr without recursing in any child Exprs.
func (expr *CaseExpr) copyNode() *CaseExpr {
exprCopy := *expr
// Copy the Whens slice.
exprCopy.Whens = make([]*When, len(expr.Whens))
for i, w := range expr.Whens {
wCopy := *w
exprCopy.Whens[i] = &wCopy
}
return &exprCopy
}
// Walk implements the Expr interface.
func (expr *CaseExpr) Walk(v Visitor) Expr {
ret := expr
if expr.Expr != nil {
e, changed := WalkExpr(v, expr.Expr)
if changed {
ret = expr.copyNode()
ret.Expr = e
}
}
for i, w := range expr.Whens {
cond, changedC := WalkExpr(v, w.Cond)
val, changedV := WalkExpr(v, w.Val)
if changedC || changedV {
if ret == expr {
ret = expr.copyNode()
}
ret.Whens[i].Cond = cond
ret.Whens[i].Val = val
}
}
if expr.Else != nil {
e, changed := WalkExpr(v, expr.Else)
if changed {
if ret == expr {
ret = expr.copyNode()
}
ret.Else = e
}
}
return ret
}
// Walk implements the Expr interface.
func (expr *CastExpr) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *CollateExpr) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *ColumnAccessExpr) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *TupleStar) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
// copyNode makes a copy of this Expr without recursing in any child Exprs.
func (expr *CoalesceExpr) copyNode() *CoalesceExpr {
exprCopy := *expr
return &exprCopy
}
// Walk implements the Expr interface.
func (expr *CoalesceExpr) Walk(v Visitor) Expr {
ret := expr
exprs, changed := walkExprSlice(v, expr.Exprs)
if changed {
if ret == expr {
ret = expr.copyNode()
}
ret.Exprs = exprs
}
return ret
}
// Walk implements the Expr interface.
func (expr *ComparisonExpr) Walk(v Visitor) Expr {
left, changedL := WalkExpr(v, expr.Left)
right, changedR := WalkExpr(v, expr.Right)
if changedL || changedR {
exprCopy := *expr
exprCopy.Left = left
exprCopy.Right = right
return &exprCopy
}
return expr
}
// copyNode makes a copy of this Expr without recursing in any child Exprs.
func (expr *FuncExpr) copyNode() *FuncExpr {
exprCopy := *expr
exprCopy.Exprs = append(Exprs(nil), exprCopy.Exprs...)
return &exprCopy
}
// copyNode makes a copy of this WindowFrame without recursing.
func (node *WindowFrame) copyNode() *WindowFrame {
nodeCopy := *node
return &nodeCopy
}
func walkWindowFrame(v Visitor, frame *WindowFrame) (*WindowFrame, bool) {
ret := frame
if frame.Bounds.StartBound != nil {
b, changed := walkWindowFrameBound(v, frame.Bounds.StartBound)
if changed {
if ret == frame {
ret = frame.copyNode()
}
ret.Bounds.StartBound = b
}
}
if frame.Bounds.EndBound != nil {
b, changed := walkWindowFrameBound(v, frame.Bounds.EndBound)
if changed {
if ret == frame {
ret = frame.copyNode()
}
ret.Bounds.EndBound = b
}
}
return ret, ret != frame
}
// copyNode makes a copy of this WindowFrameBound without recursing.
func (node *WindowFrameBound) copyNode() *WindowFrameBound {
nodeCopy := *node
return &nodeCopy
}
func walkWindowFrameBound(v Visitor, bound *WindowFrameBound) (*WindowFrameBound, bool) {
ret := bound
if bound.HasOffset() {
e, changed := WalkExpr(v, bound.OffsetExpr)
if changed {
if ret == bound {
ret = bound.copyNode()
}
ret.OffsetExpr = e
}
}
return ret, ret != bound
}
// copyNode makes a copy of this WindowDef without recursing.
func (node *WindowDef) copyNode() *WindowDef {
nodeCopy := *node
return &nodeCopy
}
func walkWindowDef(v Visitor, windowDef *WindowDef) (*WindowDef, bool) {
ret := windowDef
if len(windowDef.Partitions) > 0 {
exprs, changed := walkExprSlice(v, windowDef.Partitions)
if changed {
if ret == windowDef {
ret = windowDef.copyNode()
}
ret.Partitions = exprs
}
}
if len(windowDef.OrderBy) > 0 {
order, changed := walkOrderBy(v, windowDef.OrderBy)
if changed {
if ret == windowDef {
ret = windowDef.copyNode()
}
ret.OrderBy = order
}
}
if windowDef.Frame != nil {
frame, changed := walkWindowFrame(v, windowDef.Frame)
if changed {
if ret == windowDef {
ret = windowDef.copyNode()
}
ret.Frame = frame
}
}
return ret, ret != windowDef
}
// Walk implements the Expr interface.
func (expr *FuncExpr) Walk(v Visitor) Expr {
ret := expr
exprs, changed := walkExprSlice(v, expr.Exprs)
if changed {
if ret == expr {
ret = expr.copyNode()
}
ret.Exprs = exprs
}
if expr.Filter != nil {
e, changed := WalkExpr(v, expr.Filter)
if changed {
if ret == expr {
ret = expr.copyNode()
}
ret.Filter = e
}
}
if expr.OrderBy != nil {
order, changed := walkOrderBy(v, expr.OrderBy)
if changed {
if ret == expr {
ret = expr.copyNode()
}
ret.OrderBy = order
}
}
return ret
}
// Walk implements the Expr interface.
func (expr *IfExpr) Walk(v Visitor) Expr {
c, changedC := WalkExpr(v, expr.Cond)
t, changedT := WalkExpr(v, expr.True)
e, changedE := WalkExpr(v, expr.Else)
if changedC || changedT || changedE {
exprCopy := *expr
exprCopy.Cond = c
exprCopy.True = t
exprCopy.Else = e
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *IfErrExpr) Walk(v Visitor) Expr {
c, changedC := WalkExpr(v, expr.Cond)
t := expr.ErrCode
changedEC := false
if t != nil {
t, changedEC = WalkExpr(v, expr.ErrCode)
}
e := expr.Else
changedE := false
if e != nil {
e, changedE = WalkExpr(v, expr.Else)
}
if changedC || changedEC || changedE {
exprCopy := *expr
exprCopy.Cond = c
exprCopy.ErrCode = t
exprCopy.Else = e
return &exprCopy
}
return expr
}
// copyNode makes a copy of this Expr without recursing in any child Exprs.
func (expr *IndirectionExpr) copyNode() *IndirectionExpr {
exprCopy := *expr
exprCopy.Indirection = append(ArraySubscripts(nil), exprCopy.Indirection...)
for i, t := range exprCopy.Indirection {
subscriptCopy := *t
exprCopy.Indirection[i] = &subscriptCopy
}
return &exprCopy
}
// Walk implements the Expr interface.
func (expr *IndirectionExpr) Walk(v Visitor) Expr {
ret := expr
e, changed := WalkExpr(v, expr.Expr)
if changed {
if ret == expr {
ret = expr.copyNode()
}
ret.Expr = e
}
for i, t := range expr.Indirection {
if t.Begin != nil {
e, changed := WalkExpr(v, t.Begin)
if changed {
if ret == expr {
ret = expr.copyNode()
}
ret.Indirection[i].Begin = e
}
}
if t.End != nil {
e, changed := WalkExpr(v, t.End)
if changed {
if ret == expr {
ret = expr.copyNode()
}
ret.Indirection[i].End = e
}
}
}
return ret
}
// Walk implements the Expr interface.
func (expr *IsOfTypeExpr) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *NotExpr) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *NullIfExpr) Walk(v Visitor) Expr {
e1, changed1 := WalkExpr(v, expr.Expr1)
e2, changed2 := WalkExpr(v, expr.Expr2)
if changed1 || changed2 {
exprCopy := *expr
exprCopy.Expr1 = e1
exprCopy.Expr2 = e2
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *OrExpr) Walk(v Visitor) Expr {
left, changedL := WalkExpr(v, expr.Left)
right, changedR := WalkExpr(v, expr.Right)
if changedL || changedR {
exprCopy := *expr
exprCopy.Left = left
exprCopy.Right = right
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *ParenExpr) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *RangeCond) Walk(v Visitor) Expr {
l, changedL := WalkExpr(v, expr.Left)
f, changedF := WalkExpr(v, expr.From)
t, changedT := WalkExpr(v, expr.To)
if changedL || changedF || changedT {
exprCopy := *expr
exprCopy.Left = l
exprCopy.From = f
exprCopy.To = t
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *Subquery) Walk(v Visitor) Expr {
sel, changed := walkStmt(v, expr.Select)
if changed {
exprCopy := *expr
exprCopy.Select = sel.(SelectStatement)
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *UnaryExpr) Walk(v Visitor) Expr {
e, changed := WalkExpr(v, expr.Expr)
if changed {
exprCopy := *expr
exprCopy.Expr = e
return &exprCopy
}
return expr
}
func walkExprSlice(v Visitor, slice []Expr) ([]Expr, bool) {
copied := false
for i := range slice {
e, changed := WalkExpr(v, slice[i])
if changed {
if !copied {
slice = append([]Expr(nil), slice...)
copied = true
}
slice[i] = e
}
}
return slice, copied
}
func walkKVOptions(v Visitor, opts KVOptions) (KVOptions, bool) {
copied := false
for i := range opts {
if opts[i].Value == nil {
continue
}
e, changed := WalkExpr(v, opts[i].Value)
if changed {
if !copied {
opts = append(KVOptions(nil), opts...)
copied = true
}
opts[i].Value = e
}
}
return opts, copied
}
// Walk implements the Expr interface.
func (expr *Tuple) Walk(v Visitor) Expr {
exprs, changed := walkExprSlice(v, expr.Exprs)
if changed {
exprCopy := *expr
exprCopy.Exprs = exprs
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *Array) Walk(v Visitor) Expr {
if exprs, changed := walkExprSlice(v, expr.Exprs); changed {
exprCopy := *expr
exprCopy.Exprs = exprs
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr *ArrayFlatten) Walk(v Visitor) Expr {
if sq, changed := WalkExpr(v, expr.Subquery); changed {
exprCopy := *expr
exprCopy.Subquery = sq
return &exprCopy
}
return expr
}
// Walk implements the Expr interface.
func (expr UnqualifiedStar) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *UnresolvedName) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *AllColumnsSelector) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *ColumnItem) Walk(_ Visitor) Expr {
// TODO(knz): When ARRAY is supported, this must be extended
// to recurse into the index expressions of the ColumnItems' Selector.
return expr
}
// Walk implements the Expr interface.
func (expr DefaultVal) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr PartitionMaxVal) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr PartitionMinVal) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *NumVal) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *StrVal) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *Placeholder) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DBitArray) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DBool) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DBytes) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DDate) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DTime) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DTimeTZ) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DFloat) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DDecimal) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DInt) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DInterval) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DJSON) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DUuid) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DIPAddr) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr dNull) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DString) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DCollatedString) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DTimestamp) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DTimestampTZ) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DTuple) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DArray) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DOid) Walk(_ Visitor) Expr { return expr }
// Walk implements the Expr interface.
func (expr *DOidWrapper) Walk(_ Visitor) Expr { return expr }
// WalkExpr traverses the nodes in an expression.
//
// NOTE: Do not count on the walkStmt/WalkExpr machinery to visit all
// expressions contained in a query. Only a sub-set of all expressions are
// found by walkStmt and subsequently traversed. See the comment below on
// walkStmt for details.
func WalkExpr(v Visitor, expr Expr) (newExpr Expr, changed bool) {
recurse, newExpr := v.VisitPre(expr)
if recurse {
newExpr = newExpr.Walk(v)
newExpr = v.VisitPost(newExpr)
}
// We cannot use == because some Expr implementations are not comparable (e.g. DTuple)
return newExpr, (reflect.ValueOf(expr) != reflect.ValueOf(newExpr))
}
// WalkExprConst is a variant of WalkExpr for visitors that do not modify the expression.
func WalkExprConst(v Visitor, expr Expr) {
WalkExpr(v, expr)
// TODO(radu): we should verify that WalkExpr returns changed == false. Unfortunately that
// is not the case today because walking through non-pointer implementations of Expr (like
// DBool, DTuple) causes new nodes to be created. We should make all Expr implementations be
// pointers (which will also remove the need for using reflect.ValueOf above).
}
// walkableStmt is implemented by statements that can appear inside an expression (selects) or
// we want to start a walk from (using walkStmt).
type walkableStmt interface {
Statement
walkStmt(Visitor) Statement
}
func walkReturningClause(v Visitor, clause ReturningClause) (ReturningClause, bool) {
switch t := clause.(type) {
case *ReturningExprs:
ret := t
for i, expr := range *t {
e, changed := WalkExpr(v, expr.Expr)
if changed {
if ret == t {
ret = t.copyNode()
}
(*ret)[i].Expr = e
}
}
return ret, (ret != t)
case *ReturningNothing, *NoReturningClause:
return t, false
default:
panic(errors.AssertionFailedf("unexpected ReturningClause type: %T", t))
}
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *Backup) copyNode() *Backup {
stmtCopy := *stmt
stmtCopy.IncrementalFrom = append(Exprs(nil), stmt.IncrementalFrom...)
stmtCopy.Options = append(KVOptions(nil), stmt.Options...)
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *Backup) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.AsOf.Expr != nil {
e, changed := WalkExpr(v, stmt.AsOf.Expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.AsOf.Expr = e
}
}
for i, expr := range stmt.To {
e, changed := WalkExpr(v, expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.To[i] = e
}
}
for i, expr := range stmt.IncrementalFrom {
e, changed := WalkExpr(v, expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.IncrementalFrom[i] = e
}
}
{
opts, changed := walkKVOptions(v, stmt.Options)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Options = opts
}
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *Delete) copyNode() *Delete {
stmtCopy := *stmt
if stmt.Where != nil {
wCopy := *stmt.Where
stmtCopy.Where = &wCopy
}
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *Delete) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.Where != nil {
e, changed := WalkExpr(v, stmt.Where.Expr)
if changed {
ret = stmt.copyNode()
ret.Where.Expr = e
}
}
returning, changed := walkReturningClause(v, stmt.Returning)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Returning = returning
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *Explain) copyNode() *Explain {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *Explain) walkStmt(v Visitor) Statement {
s, changed := walkStmt(v, stmt.Statement)
if changed {
stmt = stmt.copyNode()
stmt.Statement = s
}
return stmt
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *ExplainAnalyzeDebug) copyNode() *ExplainAnalyzeDebug {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *ExplainAnalyzeDebug) walkStmt(v Visitor) Statement {
s, changed := walkStmt(v, stmt.Statement)
if changed {
stmt = stmt.copyNode()
stmt.Statement = s
}
return stmt
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *Insert) copyNode() *Insert {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *Insert) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.Rows != nil {
rows, changed := walkStmt(v, stmt.Rows)
if changed {
ret = stmt.copyNode()
ret.Rows = rows.(*Select)
}
}
returning, changed := walkReturningClause(v, stmt.Returning)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Returning = returning
}
// TODO(dan): Walk OnConflict once the ON CONFLICT DO UPDATE form of upsert is
// implemented.
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *CreateTable) copyNode() *CreateTable {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *CreateTable) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.AsSource != nil {
rows, changed := walkStmt(v, stmt.AsSource)
if changed {
ret = stmt.copyNode()
ret.AsSource = rows.(*Select)
}
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *CancelQueries) copyNode() *CancelQueries {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *CancelQueries) walkStmt(v Visitor) Statement {
sel, changed := walkStmt(v, stmt.Queries)
if changed {
stmt = stmt.copyNode()
stmt.Queries = sel.(*Select)
}
return stmt
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *CancelSessions) copyNode() *CancelSessions {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *CancelSessions) walkStmt(v Visitor) Statement {
sel, changed := walkStmt(v, stmt.Sessions)
if changed {
stmt = stmt.copyNode()
stmt.Sessions = sel.(*Select)
}
return stmt
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *ControlJobs) copyNode() *ControlJobs {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *ControlJobs) walkStmt(v Visitor) Statement {
sel, changed := walkStmt(v, stmt.Jobs)
if changed {
stmt = stmt.copyNode()
stmt.Jobs = sel.(*Select)
}
return stmt
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *Import) copyNode() *Import {
stmtCopy := *stmt
stmtCopy.Files = append(Exprs(nil), stmt.Files...)
stmtCopy.Options = append(KVOptions(nil), stmt.Options...)
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *Import) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.CreateFile != nil {
e, changed := WalkExpr(v, stmt.CreateFile)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.CreateFile = e
}
}
for i, expr := range stmt.Files {
e, changed := WalkExpr(v, expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Files[i] = e
}
}
{
opts, changed := walkKVOptions(v, stmt.Options)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Options = opts
}
}
return ret
}
// walkStmt is part of the walkableStmt interface.
func (stmt *ParenSelect) walkStmt(v Visitor) Statement {
sel, changed := walkStmt(v, stmt.Select)
if changed {
return &ParenSelect{sel.(*Select)}
}
return stmt
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *Restore) copyNode() *Restore {
stmtCopy := *stmt
stmtCopy.From = append([]PartitionedBackup(nil), stmt.From...)
stmtCopy.Options = append(KVOptions(nil), stmt.Options...)
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *Restore) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.AsOf.Expr != nil {
e, changed := WalkExpr(v, stmt.AsOf.Expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.AsOf.Expr = e
}
}
for i, backup := range stmt.From {
for j, expr := range backup {
e, changed := WalkExpr(v, expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.From[i][j] = e
}
}
}
{
opts, changed := walkKVOptions(v, stmt.Options)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Options = opts
}
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *ReturningExprs) copyNode() *ReturningExprs {
stmtCopy := append(ReturningExprs(nil), *stmt...)
return &stmtCopy
}
func walkOrderBy(v Visitor, order OrderBy) (OrderBy, bool) {
copied := false
for i := range order {
if order[i].OrderType != OrderByColumn {
continue
}
e, changed := WalkExpr(v, order[i].Expr)
if changed {
if !copied {
order = append(OrderBy(nil), order...)
copied = true
}
orderByCopy := *order[i]
orderByCopy.Expr = e
order[i] = &orderByCopy
}
}
return order, copied
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *Select) copyNode() *Select {
stmtCopy := *stmt
if stmt.Limit != nil {
lCopy := *stmt.Limit
stmtCopy.Limit = &lCopy
}
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *Select) walkStmt(v Visitor) Statement {
ret := stmt
sel, changed := walkStmt(v, stmt.Select)
if changed {
ret = stmt.copyNode()
ret.Select = sel.(SelectStatement)
}
order, changed := walkOrderBy(v, stmt.OrderBy)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.OrderBy = order
}
if stmt.Limit != nil {
if stmt.Limit.Offset != nil {
e, changed := WalkExpr(v, stmt.Limit.Offset)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Limit.Offset = e
}
}
if stmt.Limit.Count != nil {
e, changed := WalkExpr(v, stmt.Limit.Count)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Limit.Count = e
}
}
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *SelectClause) copyNode() *SelectClause {
stmtCopy := *stmt
stmtCopy.Exprs = append(SelectExprs(nil), stmt.Exprs...)
stmtCopy.From = From{
Tables: append(TableExprs(nil), stmt.From.Tables...),
AsOf: stmt.From.AsOf,
}
if stmt.Where != nil {
wCopy := *stmt.Where
stmtCopy.Where = &wCopy
}
stmtCopy.GroupBy = append(GroupBy(nil), stmt.GroupBy...)
if stmt.Having != nil {
hCopy := *stmt.Having
stmtCopy.Having = &hCopy
}
stmtCopy.Window = append(Window(nil), stmt.Window...)
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *SelectClause) walkStmt(v Visitor) Statement {
ret := stmt
for i, expr := range stmt.Exprs {
e, changed := WalkExpr(v, expr.Expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Exprs[i].Expr = e
}
}
if stmt.From.AsOf.Expr != nil {
e, changed := WalkExpr(v, stmt.From.AsOf.Expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.From.AsOf.Expr = e
}
}
if stmt.Where != nil {
e, changed := WalkExpr(v, stmt.Where.Expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Where.Expr = e
}
}
for i, expr := range stmt.GroupBy {
e, changed := WalkExpr(v, expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.GroupBy[i] = e
}
}
if stmt.Having != nil {
e, changed := WalkExpr(v, stmt.Having.Expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Having.Expr = e
}
}
for i := range stmt.Window {
w, changed := walkWindowDef(v, stmt.Window[i])
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Window[i] = w
}
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *SetVar) copyNode() *SetVar {
stmtCopy := *stmt
stmtCopy.Values = append(Exprs(nil), stmt.Values...)
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *SetVar) walkStmt(v Visitor) Statement {
ret := stmt
for i, expr := range stmt.Values {
e, changed := WalkExpr(v, expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Values[i] = e
}
}
return ret
}
// walkStmt is part of the walkableStmt interface.
func (stmt *SetZoneConfig) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.YAMLConfig != nil {
e, changed := WalkExpr(v, stmt.YAMLConfig)
if changed {
newStmt := *stmt
ret = &newStmt
ret.YAMLConfig = e
}
}
if stmt.Options != nil {
newOpts, changed := walkKVOptions(v, stmt.Options)
if changed {
if ret == stmt {
newStmt := *stmt
ret = &newStmt
}
ret.Options = newOpts
}
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *SetTracing) copyNode() *SetTracing {
stmtCopy := *stmt
stmtCopy.Values = append(Exprs(nil), stmt.Values...)
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *SetTracing) walkStmt(v Visitor) Statement {
ret := stmt
for i, expr := range stmt.Values {
e, changed := WalkExpr(v, expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Values[i] = e
}
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *SetClusterSetting) copyNode() *SetClusterSetting {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *SetClusterSetting) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.Value != nil {
e, changed := WalkExpr(v, stmt.Value)
if changed {
ret = stmt.copyNode()
ret.Value = e
}
}
return ret
}
// copyNode makes a copy of this Statement without recursing in any child Statements.
func (stmt *Update) copyNode() *Update {
stmtCopy := *stmt
stmtCopy.Exprs = make(UpdateExprs, len(stmt.Exprs))
for i, e := range stmt.Exprs {
eCopy := *e
stmtCopy.Exprs[i] = &eCopy
}
if stmt.Where != nil {
wCopy := *stmt.Where
stmtCopy.Where = &wCopy
}
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *Update) walkStmt(v Visitor) Statement {
ret := stmt
for i, expr := range stmt.Exprs {
e, changed := WalkExpr(v, expr.Expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Exprs[i].Expr = e
}
}
if stmt.Where != nil {
e, changed := WalkExpr(v, stmt.Where.Expr)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Where.Expr = e
}
}
returning, changed := walkReturningClause(v, stmt.Returning)
if changed {
if ret == stmt {
ret = stmt.copyNode()
}
ret.Returning = returning
}
return ret
}
// walkStmt is part of the walkableStmt interface.
func (stmt *ValuesClause) walkStmt(v Visitor) Statement {
ret := stmt
for i, tuple := range stmt.Rows {
exprs, changed := walkExprSlice(v, tuple)
if changed {
if ret == stmt {
ret = &ValuesClause{append([]Exprs(nil), stmt.Rows...)}
}
ret.Rows[i] = exprs
}
}
return ret
}
// copyNode makes a copy of this Statement.
func (stmt *BeginTransaction) copyNode() *BeginTransaction {
stmtCopy := *stmt
return &stmtCopy
}
// walkStmt is part of the walkableStmt interface.
func (stmt *BeginTransaction) walkStmt(v Visitor) Statement {
ret := stmt
if stmt.Modes.AsOf.Expr != nil {
e, changed := WalkExpr(v, stmt.Modes.AsOf.Expr)
if changed {
ret = stmt.copyNode()
ret.Modes.AsOf.Expr = e
}
}
return ret
}
var _ walkableStmt = &CreateTable{}
var _ walkableStmt = &Backup{}
var _ walkableStmt = &Delete{}
var _ walkableStmt = &Explain{}
var _ walkableStmt = &Insert{}
var _ walkableStmt = &Import{}
var _ walkableStmt = &ParenSelect{}
var _ walkableStmt = &Restore{}
var _ walkableStmt = &Select{}
var _ walkableStmt = &SelectClause{}
var _ walkableStmt = &SetClusterSetting{}
var _ walkableStmt = &SetVar{}
var _ walkableStmt = &Update{}
var _ walkableStmt = &ValuesClause{}
var _ walkableStmt = &CancelQueries{}
var _ walkableStmt = &CancelSessions{}
var _ walkableStmt = &ControlJobs{}
var _ walkableStmt = &BeginTransaction{}
// walkStmt walks the entire parsed stmt calling WalkExpr on each
// expression, and replacing each expression with the one returned
// by WalkExpr.
//
// NOTE: Beware that walkStmt does not necessarily traverse all parts of a
// statement by itself. For example, it will not walk into Subquery nodes
// within a FROM clause or into a JoinCond. Walk's logic is pretty
// interdependent with the logic for constructing a query plan.
func walkStmt(v Visitor, stmt Statement) (newStmt Statement, changed bool) {
walkable, ok := stmt.(walkableStmt)
if !ok {
return stmt, false
}
newStmt = walkable.walkStmt(v)
return newStmt, (stmt != newStmt)
}
type simpleVisitor struct {
fn SimpleVisitFn
err error
}
var _ Visitor = &simpleVisitor{}
func (v *simpleVisitor) VisitPre(expr Expr) (recurse bool, newExpr Expr) {
if v.err != nil {
return false, expr
}
recurse, newExpr, v.err = v.fn(expr)
if v.err != nil {
return false, expr
}
return recurse, newExpr
}
func (*simpleVisitor) VisitPost(expr Expr) Expr { return expr }
// SimpleVisitFn is a function that is run for every node in the VisitPre stage;
// see SimpleVisit.
type SimpleVisitFn func(expr Expr) (recurse bool, newExpr Expr, err error)
// SimpleVisit is a convenience wrapper for visitors that only have VisitPre
// code and don't return any results except an error. The given function is
// called in VisitPre for every node. The visitor stops as soon as an error is
// returned.
func SimpleVisit(expr Expr, preFn SimpleVisitFn) (Expr, error) {
v := simpleVisitor{fn: preFn}
newExpr, _ := WalkExpr(&v, expr)
if v.err != nil {
return nil, v.err
}
return newExpr, nil
}
type debugVisitor struct {
buf bytes.Buffer
level int
}
var _ Visitor = &debugVisitor{}
func (v *debugVisitor) VisitPre(expr Expr) (recurse bool, newExpr Expr) {
v.level++
fmt.Fprintf(&v.buf, "%*s", 2*v.level, " ")
str := fmt.Sprintf("%#v\n", expr)
// Remove "parser." to make the string more compact.
str = strings.Replace(str, "parser.", "", -1)
v.buf.WriteString(str)
return true, expr
}
func (v *debugVisitor) VisitPost(expr Expr) Expr {
v.level--
return expr
}
// ExprDebugString generates a multi-line debug string with one node per line in
// Go format.
func ExprDebugString(expr Expr) string {
v := debugVisitor{}
WalkExprConst(&v, expr)
return v.buf.String()
}
// StmtDebugString generates multi-line debug strings in Go format for the
// expressions that are part of the given statement.
func StmtDebugString(stmt Statement) string {
v := debugVisitor{}
walkStmt(&v, stmt)
return v.buf.String()
}
// Silence any warnings if these functions are not used.
var _ = ExprDebugString
var _ = StmtDebugString
| pkg/sql/sem/tree/walk.go | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.0005349447019398212,
0.00017476920038461685,
0.00015531893586739898,
0.0001692816731519997,
0.000042454470531083643
] |
{
"id": 0,
"code_window": [
"\n",
"\tvar unaryInterceptors []grpc.UnaryClientInterceptor\n",
"\n",
"\tif tracer := ctx.AmbientCtx.Tracer; tracer != nil {\n",
"\t\t// We use a SpanInclusionFunc to circumvent the interceptor's work when\n",
"\t\t// tracing is disabled. Otherwise, the interceptor causes an increase in\n",
"\t\t// the number of packets (even with an empty context!). See #17177.\n",
"\t\tunaryInterceptors = append(unaryInterceptors,\n",
"\t\t\totgrpc.OpenTracingClientInterceptor(tracer,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/rpc/context.go",
"type": "replace",
"edit_start_line_idx": 692
} | # LogicTest: local
statement ok
CREATE TABLE t (i INT)
statement ok
INSERT INTO t VALUES (2)
# Verify strings can be parsed as intervals.
query I
SELECT * FROM t AS OF SYSTEM TIME '-1us'
----
2
# Verify a forced interval type works.
query I
SELECT * FROM t AS OF SYSTEM TIME INTERVAL '-1us'
----
2
# Verify that we can use computed expressions.
query I
SELECT * FROM t AS OF SYSTEM TIME -( ('1000' || 'us')::INTERVAL )
----
2
statement error pq: AS OF SYSTEM TIME: only constant expressions or experimental_follower_read_timestamp are allowed
SELECT * FROM t AS OF SYSTEM TIME cluster_logical_timestamp()
statement error pq: subqueries are not allowed in AS OF SYSTEM TIME
SELECT * FROM t AS OF SYSTEM TIME (SELECT '-1h'::INTERVAL)
statement error pq: relation "t" does not exist
SELECT * FROM t AS OF SYSTEM TIME '-1h'
statement error pq: experimental_follower_read_timestamp\(\): experimental_follower_read_timestamp is only available in ccl distribution
SELECT * FROM t AS OF SYSTEM TIME experimental_follower_read_timestamp()
statement error pq: unknown signature: experimental_follower_read_timestamp\(string\) \(desired <timestamptz>\)
SELECT * FROM t AS OF SYSTEM TIME experimental_follower_read_timestamp('boom')
statement error pq: AS OF SYSTEM TIME: only constant expressions or experimental_follower_read_timestamp are allowed
SELECT * FROM t AS OF SYSTEM TIME now()
statement error cannot specify timestamp in the future
SELECT * FROM t AS OF SYSTEM TIME '10s'
# Verify that the TxnTimestamp used to generate now() and current_timestamp() is
# set to the historical timestamp.
query T
SELECT * FROM (SELECT now()) AS OF SYSTEM TIME '2018-01-01'
----
2018-01-01 00:00:00 +0000 UTC
# Verify that zero intervals indistinguishable from zero cause an error.
statement error pq: AS OF SYSTEM TIME: interval value '0.1us' too small, absolute value must be >= 1µs
SELECT * FROM t AS OF SYSTEM TIME '0.1us'
statement error pq: AS OF SYSTEM TIME: interval value '0-0' too small, absolute value must be >= 1µs
SELECT * FROM t AS OF SYSTEM TIME '0-0'
statement error pq: AS OF SYSTEM TIME: interval value '-0.1us' too small, absolute value must be >= 1µs
SELECT * FROM t AS OF SYSTEM TIME '-0.1us'
statement error pq: AS OF SYSTEM TIME: zero timestamp is invalid
SELECT * FROM t AS OF SYSTEM TIME '0'
# Verify we can explain a statement that has AS OF.
statement ok
EXPLAIN SELECT * FROM t AS OF SYSTEM TIME '-1us'
| pkg/sql/logictest/testdata/logic_test/as_of | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.0003267823485657573,
0.00019035419973079115,
0.0001642076822463423,
0.00016939268971327692,
0.00005216172576183453
] |
{
"id": 0,
"code_window": [
"\n",
"\tvar unaryInterceptors []grpc.UnaryClientInterceptor\n",
"\n",
"\tif tracer := ctx.AmbientCtx.Tracer; tracer != nil {\n",
"\t\t// We use a SpanInclusionFunc to circumvent the interceptor's work when\n",
"\t\t// tracing is disabled. Otherwise, the interceptor causes an increase in\n",
"\t\t// the number of packets (even with an empty context!). See #17177.\n",
"\t\tunaryInterceptors = append(unaryInterceptors,\n",
"\t\t\totgrpc.OpenTracingClientInterceptor(tracer,\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep"
],
"after_edit": [],
"file_path": "pkg/rpc/context.go",
"type": "replace",
"edit_start_line_idx": 692
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
import * as protos from "src/js/protos";
import { FixLong } from "src/util/fixLong";
import _ from "lodash";
type TableDetailsResponse = protos.cockroach.server.serverpb.TableDetailsResponse;
type TableStatsResponse = protos.cockroach.server.serverpb.TableStatsResponse;
// TableInfo is a supporting data structure which combines data about a single
// table that was obtained from multiple backend sources.
export class TableInfo {
public name: string;
public id: number;
public numColumns: number;
public numIndices: number;
public physicalSize: number;
public mvccSize: protos.cockroach.storage.enginepb.IMVCCStats;
public rangeCount: number;
public createStatement: string;
public grants: protos.cockroach.server.serverpb.TableDetailsResponse.IGrant[];
constructor(name: string, details: TableDetailsResponse, stats: TableStatsResponse) {
this.name = name;
this.id = details && details.descriptor_id.toNumber();
this.numColumns = details && details.columns.length;
this.numIndices = details && _.uniqBy(details.indexes, idx => idx.name).length;
this.rangeCount = stats && stats.range_count && stats.range_count.toNumber();
this.createStatement = details && details.create_table_statement;
this.grants = details && details.grants;
if (stats) {
this.mvccSize = stats.stats;
this.physicalSize = FixLong(stats.approximate_disk_bytes).toNumber();
}
}
}
| pkg/ui/src/views/databases/data/tableInfo.tsx | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.00017889196169562638,
0.0001740854058880359,
0.00016878306632861495,
0.00017465186829213053,
0.000003265304940214264
] |
{
"id": 1,
"code_window": [
"\t\tunaryInterceptors = append(unaryInterceptors,\n",
"\t\t\totgrpc.OpenTracingClientInterceptor(tracer,\n",
"\t\t\t\totgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient))))\n",
"\t}\n",
"\tif ctx.testingKnobs.UnaryClientInterceptor != nil {\n",
"\t\ttestingUnaryInterceptor := ctx.testingKnobs.UnaryClientInterceptor(target, class)\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// We use a SpanInclusionFunc to circumvent the interceptor's work when\n",
"\t\t\t\t// tracing is disabled. Otherwise, the interceptor causes an increase in\n",
"\t\t\t\t// the number of packets (even with an empty context!). See #17177.\n",
"\t\t\t\totgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient)),\n",
"\t\t\t\t// We use a decorator to set the \"node\" tag. All other spans get the\n",
"\t\t\t\t// node tag from context log tags.\n",
"\t\t\t\t//\n",
"\t\t\t\t// Unfortunately we cannot use the corresponding interceptor on the\n",
"\t\t\t\t// server-side of gRPC to set this tag on server spans because that\n",
"\t\t\t\t// interceptor runs too late - after a traced RPC's recording had\n",
"\t\t\t\t// already been collected. So, on the server-side, the equivalent code\n",
"\t\t\t\t// is in setupSpanForIncomingRPC().\n",
"\t\t\t\totgrpc.SpanDecorator(func(span opentracing.Span, _ string, _, _ interface{}, _ error) {\n",
"\t\t\t\t\tspan.SetTag(\"node\", ctx.NodeID.String())\n",
"\t\t\t\t})))\n"
],
"file_path": "pkg/rpc/context.go",
"type": "replace",
"edit_start_line_idx": 697
} | // Copyright 2014 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package server
import (
"context"
"fmt"
"net"
"sort"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/build"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/status"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/growstack"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/logtags"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
)
const (
// gossipStatusInterval is the interval for logging gossip status.
gossipStatusInterval = 1 * time.Minute
// FirstNodeID is the node ID of the first node in a new cluster.
FirstNodeID = 1
graphiteIntervalKey = "external.graphite.interval"
maxGraphiteInterval = 15 * time.Minute
)
// Metric names.
var (
metaExecLatency = metric.Metadata{
Name: "exec.latency",
Help: "Latency of batch KV requests executed on this node",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
metaExecSuccess = metric.Metadata{
Name: "exec.success",
Help: "Number of batch KV requests executed successfully on this node",
Measurement: "Batch KV Requests",
Unit: metric.Unit_COUNT,
}
metaExecError = metric.Metadata{
Name: "exec.error",
Help: "Number of batch KV requests that failed to execute on this node",
Measurement: "Batch KV Requests",
Unit: metric.Unit_COUNT,
}
metaDiskStalls = metric.Metadata{
Name: "engine.stalls",
Help: "Number of disk stalls detected on this node",
Measurement: "Disk stalls detected",
Unit: metric.Unit_COUNT,
}
)
// Cluster settings.
var (
// graphiteEndpoint is host:port, if any, of Graphite metrics server.
graphiteEndpoint = settings.RegisterPublicStringSetting(
"external.graphite.endpoint",
"if nonempty, push server metrics to the Graphite or Carbon server at the specified host:port",
"",
)
// graphiteInterval is how often metrics are pushed to Graphite, if enabled.
graphiteInterval = settings.RegisterPublicNonNegativeDurationSettingWithMaximum(
graphiteIntervalKey,
"the interval at which metrics are pushed to Graphite (if enabled)",
10*time.Second,
maxGraphiteInterval,
)
)
type nodeMetrics struct {
Latency *metric.Histogram
Success *metric.Counter
Err *metric.Counter
DiskStalls *metric.Counter
}
func makeNodeMetrics(reg *metric.Registry, histogramWindow time.Duration) nodeMetrics {
nm := nodeMetrics{
Latency: metric.NewLatency(metaExecLatency, histogramWindow),
Success: metric.NewCounter(metaExecSuccess),
Err: metric.NewCounter(metaExecError),
DiskStalls: metric.NewCounter(metaDiskStalls),
}
reg.AddMetricStruct(nm)
return nm
}
// callComplete records very high-level metrics about the number of completed
// calls and their latency. Currently, this only records statistics at the batch
// level; stats on specific lower-level kv operations are not recorded.
func (nm nodeMetrics) callComplete(d time.Duration, pErr *roachpb.Error) {
if pErr != nil && pErr.TransactionRestart == roachpb.TransactionRestart_NONE {
nm.Err.Inc(1)
} else {
nm.Success.Inc(1)
}
nm.Latency.RecordValue(d.Nanoseconds())
}
// A Node manages a map of stores (by store ID) for which it serves
// traffic. A node is the top-level data structure. There is one node
// instance per process. A node accepts incoming RPCs and services
// them by directing the commands contained within RPCs to local
// stores, which in turn direct the commands to specific ranges. Each
// node has access to the global, monolithic Key-Value abstraction via
// its client.DB reference. Nodes use this to allocate node and store
// IDs for bootstrapping the node itself or new stores as they're added
// on subsequent instantiations.
type Node struct {
stopper *stop.Stopper
clusterID *base.ClusterIDContainer // UUID for Cockroach cluster
Descriptor roachpb.NodeDescriptor // Node ID, network/physical topology
storeCfg kvserver.StoreConfig // Config to use and pass to stores
eventLogger sql.EventLogger
stores *kvserver.Stores // Access to node-local stores
metrics nodeMetrics
recorder *status.MetricsRecorder
startedAt int64
lastUp int64
initialBoot bool // True if this is the first time this node has started.
txnMetrics kvcoord.TxnMetrics
perReplicaServer kvserver.Server
}
// allocateNodeID increments the node id generator key to allocate
// a new, unique node id.
func allocateNodeID(ctx context.Context, db *kv.DB) (roachpb.NodeID, error) {
val, err := kv.IncrementValRetryable(ctx, db, keys.NodeIDGenerator, 1)
if err != nil {
return 0, errors.Wrap(err, "unable to allocate node ID")
}
return roachpb.NodeID(val), nil
}
// allocateStoreIDs increments the store id generator key for the
// specified node to allocate count new, unique store ids. The
// first ID in a contiguous range is returned on success.
func allocateStoreIDs(
ctx context.Context, nodeID roachpb.NodeID, count int64, db *kv.DB,
) (roachpb.StoreID, error) {
val, err := kv.IncrementValRetryable(ctx, db, keys.StoreIDGenerator, count)
if err != nil {
return 0, errors.Wrapf(err, "unable to allocate %d store IDs for node %d", count, nodeID)
}
return roachpb.StoreID(val - count + 1), nil
}
// GetBootstrapSchema returns the schema which will be used to bootstrap a new
// server.
func GetBootstrapSchema(
defaultZoneConfig *zonepb.ZoneConfig, defaultSystemZoneConfig *zonepb.ZoneConfig,
) sqlbase.MetadataSchema {
return sqlbase.MakeMetadataSchema(defaultZoneConfig, defaultSystemZoneConfig)
}
// bootstrapCluster initializes the passed-in engines for a new cluster.
// Returns the cluster ID.
//
// The first engine will contain ranges for various static split points (i.e.
// various system ranges and system tables). Note however that many of these
// ranges cannot be accessed by KV in regular means until the node liveness is
// written, since epoch-based leases cannot be granted until then. All other
// engines are initialized with their StoreIdent.
func bootstrapCluster(
ctx context.Context,
engines []storage.Engine,
bootstrapVersion clusterversion.ClusterVersion,
defaultZoneConfig *zonepb.ZoneConfig,
defaultSystemZoneConfig *zonepb.ZoneConfig,
) (*initState, error) {
clusterID := uuid.MakeV4()
// TODO(andrei): It'd be cool if this method wouldn't do anything to engines
// other than the first one, and let regular node startup code deal with them.
for i, eng := range engines {
sIdent := roachpb.StoreIdent{
ClusterID: clusterID,
NodeID: FirstNodeID,
StoreID: roachpb.StoreID(i + 1),
}
// Initialize the engine backing the store with the store ident and cluster
// version.
if err := kvserver.InitEngine(ctx, eng, sIdent, bootstrapVersion); err != nil {
return nil, err
}
// Create first range, writing directly to engine. Note this does
// not create the range, just its data. Only do this if this is the
// first store.
if i == 0 {
schema := GetBootstrapSchema(defaultZoneConfig, defaultSystemZoneConfig)
initialValues, tableSplits := schema.GetInitialValues(bootstrapVersion)
splits := append(config.StaticSplits(), tableSplits...)
sort.Slice(splits, func(i, j int) bool {
return splits[i].Less(splits[j])
})
if err := kvserver.WriteInitialClusterData(
ctx, eng, initialValues,
bootstrapVersion.Version, len(engines), splits,
hlc.UnixNano(),
); err != nil {
return nil, err
}
}
}
state := &initState{
initDiskState: initDiskState{
nodeID: FirstNodeID,
clusterID: clusterID,
clusterVersion: bootstrapVersion,
initializedEngines: engines,
},
joined: true,
}
return state, nil
}
// NewNode returns a new instance of Node.
//
// execCfg can be nil to help bootstrapping of a Server (the Node is created
// before the ExecutorConfig is initialized). In that case, InitLogger() needs
// to be called before the Node is used.
func NewNode(
cfg kvserver.StoreConfig,
recorder *status.MetricsRecorder,
reg *metric.Registry,
stopper *stop.Stopper,
txnMetrics kvcoord.TxnMetrics,
execCfg *sql.ExecutorConfig,
clusterID *base.ClusterIDContainer,
) *Node {
var eventLogger sql.EventLogger
if execCfg != nil {
eventLogger = sql.MakeEventLogger(execCfg)
}
n := &Node{
storeCfg: cfg,
stopper: stopper,
recorder: recorder,
metrics: makeNodeMetrics(reg, cfg.HistogramWindowInterval),
stores: kvserver.NewStores(
cfg.AmbientCtx, cfg.Clock,
cfg.Settings.Version.BinaryVersion(),
cfg.Settings.Version.BinaryMinSupportedVersion()),
txnMetrics: txnMetrics,
eventLogger: eventLogger,
clusterID: clusterID,
}
n.perReplicaServer = kvserver.MakeServer(&n.Descriptor, n.stores)
return n
}
// InitLogger needs to be called if a nil execCfg was passed to NewNode().
func (n *Node) InitLogger(execCfg *sql.ExecutorConfig) {
n.eventLogger = sql.MakeEventLogger(execCfg)
}
// String implements fmt.Stringer.
func (n *Node) String() string {
return fmt.Sprintf("node=%d", n.Descriptor.NodeID)
}
// AnnotateCtx is a convenience wrapper; see AmbientContext.
func (n *Node) AnnotateCtx(ctx context.Context) context.Context {
return n.storeCfg.AmbientCtx.AnnotateCtx(ctx)
}
// AnnotateCtxWithSpan is a convenience wrapper; see AmbientContext.
func (n *Node) AnnotateCtxWithSpan(
ctx context.Context, opName string,
) (context.Context, opentracing.Span) {
return n.storeCfg.AmbientCtx.AnnotateCtxWithSpan(ctx, opName)
}
func (n *Node) onClusterVersionChange(ctx context.Context, cv clusterversion.ClusterVersion) {
if err := n.stores.OnClusterVersionChange(ctx, cv); err != nil {
log.Fatal(ctx, errors.Wrapf(err, "updating cluster version to %v", cv))
}
}
// start starts the node by registering the storage instance for the
// RPC service "Node" and initializing stores for each specified
// engine. Launches periodic store gossiping in a goroutine.
// A callback can be optionally provided that will be invoked once this node's
// NodeDescriptor is available, to help bootstrapping.
func (n *Node) start(
ctx context.Context,
addr, sqlAddr net.Addr,
state initState,
clusterName string,
attrs roachpb.Attributes,
locality roachpb.Locality,
localityAddress []roachpb.LocalityAddress,
nodeDescriptorCallback func(descriptor roachpb.NodeDescriptor),
) error {
if err := clusterversion.Initialize(ctx, state.clusterVersion.Version, &n.storeCfg.Settings.SV); err != nil {
return err
}
// Obtaining the NodeID requires a dance of sorts. If the node has initialized
// stores, the NodeID is persisted in each of them. If not, then we'll need to
// use the KV store to get a NodeID assigned.
n.initialBoot = state.joined
nodeID := state.nodeID
if nodeID == 0 {
if !state.joined {
log.Fatalf(ctx, "node has no NodeID, but claims to not be joining cluster")
}
// Allocate NodeID. Note that Gossip is already connected because if there's
// no NodeID yet, this means that we had to connect Gossip to learn the ClusterID.
select {
case <-n.storeCfg.Gossip.Connected:
default:
log.Fatalf(ctx, "Gossip is not connected yet")
}
ctxWithSpan, span := n.AnnotateCtxWithSpan(ctx, "alloc-node-id")
newID, err := allocateNodeID(ctxWithSpan, n.storeCfg.DB)
if err != nil {
return err
}
log.Infof(ctxWithSpan, "new node allocated ID %d", newID)
span.Finish()
nodeID = newID
}
// Inform the RPC context of the node ID.
n.storeCfg.RPCContext.NodeID.Set(ctx, nodeID)
n.startedAt = n.storeCfg.Clock.Now().WallTime
n.Descriptor = roachpb.NodeDescriptor{
NodeID: nodeID,
Address: util.MakeUnresolvedAddr(addr.Network(), addr.String()),
SQLAddress: util.MakeUnresolvedAddr(sqlAddr.Network(), sqlAddr.String()),
Attrs: attrs,
Locality: locality,
LocalityAddress: localityAddress,
ClusterName: clusterName,
ServerVersion: n.storeCfg.Settings.Version.BinaryVersion(),
BuildTag: build.GetInfo().Tag,
StartedAt: n.startedAt,
}
// Invoke any passed in nodeDescriptorCallback as soon as it's available, to
// ensure that other components (currently the DistSQLPlanner) are initialized
// before store startup continues.
if nodeDescriptorCallback != nil {
nodeDescriptorCallback(n.Descriptor)
}
// Gossip the node descriptor to make this node addressable by node ID.
n.storeCfg.Gossip.NodeID.Set(ctx, n.Descriptor.NodeID)
if err := n.storeCfg.Gossip.SetNodeDescriptor(&n.Descriptor); err != nil {
return errors.Errorf("couldn't gossip descriptor for node %d: %s", n.Descriptor.NodeID, err)
}
// Start the closed timestamp subsystem.
n.storeCfg.ClosedTimestamp.Start(n.Descriptor.NodeID)
// Create stores from the engines that were already bootstrapped.
for _, e := range state.initializedEngines {
s := kvserver.NewStore(ctx, n.storeCfg, e, &n.Descriptor)
if err := s.Start(ctx, n.stopper); err != nil {
return errors.Errorf("failed to start store: %s", err)
}
capacity, err := s.Capacity(false /* useCached */)
if err != nil {
return errors.Errorf("could not query store capacity: %s", err)
}
log.Infof(ctx, "initialized store %s: %+v", s, capacity)
n.addStore(s)
}
// Verify all initialized stores agree on cluster and node IDs.
if err := n.validateStores(ctx); err != nil {
return err
}
log.VEventf(ctx, 2, "validated stores")
// Compute the time this node was last up; this is done by reading the
// "last up time" from every store and choosing the most recent timestamp.
var mostRecentTimestamp hlc.Timestamp
if err := n.stores.VisitStores(func(s *kvserver.Store) error {
timestamp, err := s.ReadLastUpTimestamp(ctx)
if err != nil {
return err
}
if mostRecentTimestamp.Less(timestamp) {
mostRecentTimestamp = timestamp
}
return nil
}); err != nil {
return errors.Wrapf(err, "failed to read last up timestamp from stores")
}
n.lastUp = mostRecentTimestamp.WallTime
// Set the stores map as the gossip persistent storage, so that
// gossip can bootstrap using the most recently persisted set of
// node addresses.
if err := n.storeCfg.Gossip.SetStorage(n.stores); err != nil {
return fmt.Errorf("failed to initialize the gossip interface: %s", err)
}
// Read persisted ClusterVersion from each configured store to
// verify there are no stores with data too old or too new for this
// binary.
if _, err := n.stores.SynthesizeClusterVersion(ctx); err != nil {
return err
}
// Bootstrap any uninitialized stores.
//
// TODO(tbg): address https://github.com/cockroachdb/cockroach/issues/39415.
// Should be easy enough. Writing the test is probably most of the work.
if len(state.newEngines) > 0 {
if err := n.bootstrapStores(ctx, state.newEngines, n.stopper); err != nil {
return err
}
}
n.startComputePeriodicMetrics(n.stopper, DefaultMetricsSampleInterval)
// Now that we've created all our stores, install the gossip version update
// handler to write version updates to them.
// It's important that we persist new versions to the engines before the node
// starts using it, otherwise the node might regress the version after a
// crash.
clusterversion.SetBeforeChange(ctx, &n.storeCfg.Settings.SV, n.onClusterVersionChange)
// Invoke the callback manually once so that we persist the updated value that
// gossip might have already received.
clusterVersion := n.storeCfg.Settings.Version.ActiveVersion(ctx)
n.onClusterVersionChange(ctx, clusterVersion)
// Be careful about moving this line above `startStores`; store migrations rely
// on the fact that the cluster version has not been updated via Gossip (we
// have migrations that want to run only if the server starts with a given
// cluster version, but not if the server starts with a lower one and gets
// bumped immediately, which would be possible if gossip got started earlier).
n.startGossip(ctx, n.stopper)
allEngines := append([]storage.Engine(nil), state.initializedEngines...)
allEngines = append(allEngines, state.newEngines...)
log.Infof(ctx, "%s: started with %v engine(s) and attributes %v", n, allEngines, attrs.Attrs)
return nil
}
// IsDraining returns true if at least one Store housed on this Node is not
// currently allowing range leases to be procured or extended.
func (n *Node) IsDraining() bool {
var isDraining bool
if err := n.stores.VisitStores(func(s *kvserver.Store) error {
isDraining = isDraining || s.IsDraining()
return nil
}); err != nil {
panic(err)
}
return isDraining
}
// SetDraining sets the draining mode on all of the node's underlying stores.
// The reporter callback, if non-nil, is called on a best effort basis
// to report work that needed to be done and which may or may not have
// been done by the time this call returns. See the explanation in
// pkg/server/drain.go for details.
func (n *Node) SetDraining(drain bool, reporter func(int, string)) error {
return n.stores.VisitStores(func(s *kvserver.Store) error {
s.SetDraining(drain, reporter)
return nil
})
}
// SetHLCUpperBound sets the upper bound of the HLC wall time on all of the
// node's underlying stores.
func (n *Node) SetHLCUpperBound(ctx context.Context, hlcUpperBound int64) error {
return n.stores.VisitStores(func(s *kvserver.Store) error {
return s.WriteHLCUpperBound(ctx, hlcUpperBound)
})
}
func (n *Node) addStore(store *kvserver.Store) {
cv, err := store.GetClusterVersion(context.TODO())
if err != nil {
log.Fatal(context.TODO(), err)
}
if cv == (clusterversion.ClusterVersion{}) {
// The store should have had a version written to it during the store
// bootstrap process.
log.Fatal(context.TODO(), "attempting to add a store without a version")
}
n.stores.AddStore(store)
n.recorder.AddStore(store)
}
// validateStores iterates over all stores, verifying they agree on node ID.
// The node's ident is initialized based on the agreed-upon node ID. Note that
// cluster ID consistency is checked elsewhere in inspectEngines.
//
// TODO(tbg): remove this, we already validate everything in inspectEngines now.
func (n *Node) validateStores(ctx context.Context) error {
return n.stores.VisitStores(func(s *kvserver.Store) error {
if n.Descriptor.NodeID != s.Ident.NodeID {
return errors.Errorf("store %s node ID doesn't match node ID: %d", s, n.Descriptor.NodeID)
}
return nil
})
}
// bootstrapStores bootstraps uninitialized stores once the cluster
// and node IDs have been established for this node. Store IDs are
// allocated via a sequence id generator stored at a system key per
// node. The new stores are added to n.stores.
func (n *Node) bootstrapStores(
ctx context.Context, emptyEngines []storage.Engine, stopper *stop.Stopper,
) error {
if n.clusterID.Get() == uuid.Nil {
return errors.New("ClusterID missing during store bootstrap of auxiliary store")
}
// There's a bit of an awkward dance around cluster versions here. If this node
// is joining an existing cluster for the first time, it doesn't have any engines
// set up yet, and cv below will be the binary's minimum supported version.
// At the same time, the Gossip update which notifies us about the real
// cluster version won't persist it to any engines (because we haven't
// installed the gossip update handler yet and also because none of the
// stores are bootstrapped). So we just accept that we won't use the correct
// version here, but post-bootstrapping will invoke the callback manually,
// which will disseminate the correct version to all engines.
cv, err := n.stores.SynthesizeClusterVersion(ctx)
if err != nil {
return errors.Errorf("error retrieving cluster version for bootstrap: %s", err)
}
{
// Bootstrap all waiting stores by allocating a new store id for
// each and invoking storage.Bootstrap() to persist it and the cluster
// version and to create stores.
inc := int64(len(emptyEngines))
firstID, err := allocateStoreIDs(ctx, n.Descriptor.NodeID, inc, n.storeCfg.DB)
if err != nil {
return errors.Errorf("error allocating store ids: %s", err)
}
sIdent := roachpb.StoreIdent{
ClusterID: n.clusterID.Get(),
NodeID: n.Descriptor.NodeID,
StoreID: firstID,
}
for _, eng := range emptyEngines {
if err := kvserver.InitEngine(ctx, eng, sIdent, cv); err != nil {
return err
}
s := kvserver.NewStore(ctx, n.storeCfg, eng, &n.Descriptor)
if err := s.Start(ctx, stopper); err != nil {
return err
}
n.addStore(s)
log.Infof(ctx, "bootstrapped store %s", s)
// Done regularly in Node.startGossip, but this cuts down the time
// until this store is used for range allocations.
if err := s.GossipStore(ctx, false /* useCached */); err != nil {
log.Warningf(ctx, "error doing initial gossiping: %s", err)
}
sIdent.StoreID++
}
}
// write a new status summary after all stores have been bootstrapped; this
// helps the UI remain responsive when new nodes are added.
if err := n.writeNodeStatus(ctx, 0 /* alertTTL */); err != nil {
log.Warningf(ctx, "error writing node summary after store bootstrap: %s", err)
}
return nil
}
// startGossip loops on a periodic ticker to gossip node-related
// information. Starts a goroutine to loop until the node is closed.
func (n *Node) startGossip(ctx context.Context, stopper *stop.Stopper) {
ctx = n.AnnotateCtx(ctx)
stopper.RunWorker(ctx, func(ctx context.Context) {
// Verify we've already gossiped our node descriptor.
//
// TODO(tbg): see if we really needed to do this earlier already. We
// probably needed to (this call has to come late for ... reasons I
// still need to look into) and nobody can talk to this node until
// the descriptor is in Gossip.
if _, err := n.storeCfg.Gossip.GetNodeDescriptor(n.Descriptor.NodeID); err != nil {
panic(err)
}
// NB: Gossip may not be connected at this point. That's fine though,
// we can still gossip something; Gossip sends it out reactively once
// it can.
statusTicker := time.NewTicker(gossipStatusInterval)
storesTicker := time.NewTicker(gossip.StoresInterval)
nodeTicker := time.NewTicker(gossip.NodeDescriptorInterval)
defer storesTicker.Stop()
defer nodeTicker.Stop()
n.gossipStores(ctx) // one-off run before going to sleep
for {
select {
case <-statusTicker.C:
n.storeCfg.Gossip.LogStatus()
case <-storesTicker.C:
n.gossipStores(ctx)
case <-nodeTicker.C:
if err := n.storeCfg.Gossip.SetNodeDescriptor(&n.Descriptor); err != nil {
log.Warningf(ctx, "couldn't gossip descriptor for node %d: %s", n.Descriptor.NodeID, err)
}
case <-stopper.ShouldStop():
return
}
}
})
}
// gossipStores broadcasts each store and dead replica to the gossip network.
func (n *Node) gossipStores(ctx context.Context) {
if err := n.stores.VisitStores(func(s *kvserver.Store) error {
return s.GossipStore(ctx, false /* useCached */)
}); err != nil {
log.Warning(ctx, err)
}
}
// startComputePeriodicMetrics starts a loop which periodically instructs each
// store to compute the value of metrics which cannot be incrementally
// maintained.
func (n *Node) startComputePeriodicMetrics(stopper *stop.Stopper, interval time.Duration) {
ctx := n.AnnotateCtx(context.Background())
stopper.RunWorker(ctx, func(ctx context.Context) {
// Compute periodic stats at the same frequency as metrics are sampled.
ticker := time.NewTicker(interval)
defer ticker.Stop()
for tick := 0; ; tick++ {
select {
case <-ticker.C:
if err := n.computePeriodicMetrics(ctx, tick); err != nil {
log.Errorf(ctx, "failed computing periodic metrics: %s", err)
}
case <-stopper.ShouldStop():
return
}
}
})
}
// computePeriodicMetrics instructs each store to compute the value of
// complicated metrics.
func (n *Node) computePeriodicMetrics(ctx context.Context, tick int) error {
return n.stores.VisitStores(func(store *kvserver.Store) error {
if err := store.ComputeMetrics(ctx, tick); err != nil {
log.Warningf(ctx, "%s: unable to compute metrics: %s", store, err)
}
return nil
})
}
func (n *Node) startGraphiteStatsExporter(st *cluster.Settings) {
ctx := logtags.AddTag(n.AnnotateCtx(context.Background()), "graphite stats exporter", nil)
pm := metric.MakePrometheusExporter()
n.stopper.RunWorker(ctx, func(ctx context.Context) {
var timer timeutil.Timer
defer timer.Stop()
for {
timer.Reset(graphiteInterval.Get(&st.SV))
select {
case <-n.stopper.ShouldStop():
return
case <-timer.C:
timer.Read = true
endpoint := graphiteEndpoint.Get(&st.SV)
if endpoint != "" {
if err := n.recorder.ExportToGraphite(ctx, endpoint, &pm); err != nil {
log.Infof(ctx, "error pushing metrics to graphite: %s\n", err)
}
}
}
}
})
}
// startWriteNodeStatus begins periodically persisting status summaries for the
// node and its stores.
func (n *Node) startWriteNodeStatus(frequency time.Duration) {
ctx := logtags.AddTag(n.AnnotateCtx(context.Background()), "summaries", nil)
// Immediately record summaries once on server startup.
if err := n.writeNodeStatus(ctx, 0 /* alertTTL */); err != nil {
log.Warningf(ctx, "error recording initial status summaries: %s", err)
}
n.stopper.RunWorker(ctx, func(ctx context.Context) {
// Write a status summary immediately; this helps the UI remain
// responsive when new nodes are added.
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Use an alertTTL of twice the ticker frequency. This makes sure that
// alerts don't disappear and reappear spuriously while at the same
// time ensuring that an alert doesn't linger for too long after having
// resolved.
if err := n.writeNodeStatus(ctx, 2*frequency); err != nil {
log.Warningf(ctx, "error recording status summaries: %s", err)
}
case <-n.stopper.ShouldStop():
return
}
}
})
}
// writeNodeStatus retrieves status summaries from the supplied
// NodeStatusRecorder and persists them to the cockroach data store.
func (n *Node) writeNodeStatus(ctx context.Context, alertTTL time.Duration) error {
var err error
if runErr := n.stopper.RunTask(ctx, "node.Node: writing summary", func(ctx context.Context) {
nodeStatus := n.recorder.GenerateNodeStatus(ctx)
if nodeStatus == nil {
return
}
if result := n.recorder.CheckHealth(ctx, *nodeStatus); len(result.Alerts) != 0 {
var numNodes int
if err := n.storeCfg.Gossip.IterateInfos(gossip.KeyNodeIDPrefix, func(k string, info gossip.Info) error {
numNodes++
return nil
}); err != nil {
log.Warning(ctx, err)
}
if numNodes > 1 {
// Avoid this warning on single-node clusters, which require special UX.
log.Warningf(ctx, "health alerts detected: %+v", result)
}
if err := n.storeCfg.Gossip.AddInfoProto(
gossip.MakeNodeHealthAlertKey(n.Descriptor.NodeID), &result, alertTTL,
); err != nil {
log.Warningf(ctx, "unable to gossip health alerts: %+v", result)
}
// TODO(tschottdorf): add a metric that we increment every time there are
// alerts. This can help understand how long the cluster has been in that
// state (since it'll be incremented every ~10s).
}
err = n.recorder.WriteNodeStatus(ctx, n.storeCfg.DB, *nodeStatus)
}); runErr != nil {
err = runErr
}
return err
}
// recordJoinEvent begins an asynchronous task which attempts to log a "node
// join" or "node restart" event. This query will retry until it succeeds or the
// server stops.
func (n *Node) recordJoinEvent() {
if !n.storeCfg.LogRangeEvents {
return
}
logEventType := sql.EventLogNodeRestart
lastUp := n.lastUp
if n.initialBoot {
logEventType = sql.EventLogNodeJoin
lastUp = n.startedAt
}
n.stopper.RunWorker(context.Background(), func(bgCtx context.Context) {
ctx, span := n.AnnotateCtxWithSpan(bgCtx, "record-join-event")
defer span.Finish()
retryOpts := base.DefaultRetryOptions()
retryOpts.Closer = n.stopper.ShouldStop()
for r := retry.Start(retryOpts); r.Next(); {
if err := n.storeCfg.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
return n.eventLogger.InsertEventRecord(
ctx,
txn,
logEventType,
int32(n.Descriptor.NodeID),
int32(n.Descriptor.NodeID),
struct {
Descriptor roachpb.NodeDescriptor
ClusterID uuid.UUID
StartedAt int64
LastUp int64
}{n.Descriptor, n.clusterID.Get(), n.startedAt, lastUp},
)
}); err != nil {
log.Warningf(ctx, "%s: unable to log %s event: %s", n, logEventType, err)
} else {
return
}
}
})
}
// If we receive a (proto-marshaled) roachpb.BatchRequest whose Requests contain
// a message type unknown to this node, we will end up with a zero entry in the
// slice. If we don't error out early, this breaks all sorts of assumptions and
// usually ends in a panic.
func checkNoUnknownRequest(reqs []roachpb.RequestUnion) *roachpb.UnsupportedRequestError {
for _, req := range reqs {
if req.GetValue() == nil {
return &roachpb.UnsupportedRequestError{}
}
}
return nil
}
func (n *Node) batchInternal(
ctx context.Context, args *roachpb.BatchRequest,
) (*roachpb.BatchResponse, error) {
if detail := checkNoUnknownRequest(args.Requests); detail != nil {
var br roachpb.BatchResponse
br.Error = roachpb.NewError(detail)
return &br, nil
}
var br *roachpb.BatchResponse
if err := n.stopper.RunTaskWithErr(ctx, "node.Node: batch", func(ctx context.Context) error {
var finishSpan func(*roachpb.BatchResponse)
// Shadow ctx from the outer function. Written like this to pass the linter.
ctx, finishSpan = n.setupSpanForIncomingRPC(ctx, grpcutil.IsLocalRequestContext(ctx))
// NB: wrapped to delay br evaluation to its value when returning.
defer func() { finishSpan(br) }()
if log.HasSpanOrEvent(ctx) {
log.Eventf(ctx, "node received request: %s", args.Summary())
}
tStart := timeutil.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *args)
if pErr != nil {
br = &roachpb.BatchResponse{}
log.VErrEventf(ctx, 3, "%T", pErr.GetDetail())
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.metrics.callComplete(timeutil.Since(tStart), pErr)
br.Error = pErr
return nil
}); err != nil {
return nil, err
}
return br, nil
}
// Batch implements the roachpb.InternalServer interface.
func (n *Node) Batch(
ctx context.Context, args *roachpb.BatchRequest,
) (*roachpb.BatchResponse, error) {
// NB: Node.Batch is called directly for "local" calls. We don't want to
// carry the associated log tags forward as doing so makes adding additional
// log tags more expensive and makes local calls differ from remote calls.
ctx = n.storeCfg.AmbientCtx.ResetAndAnnotateCtx(ctx)
br, err := n.batchInternal(ctx, args)
// We always return errors via BatchResponse.Error so structure is
// preserved; plain errors are presumed to be from the RPC
// framework and not from cockroach.
if err != nil {
if br == nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
log.Fatalf(
ctx, "attempting to return both a plain error (%s) and roachpb.Error (%s)", err, br.Error,
)
}
br.Error = roachpb.NewError(err)
}
return br, nil
}
// setupSpanForIncomingRPC takes a context and returns a derived context with a
// new span in it. Depending on the input context, that span might be a root
// span or a child span. If it is a child span, it might be a child span of a
// local or a remote span. Note that supporting both the "child of local span"
// and "child of remote span" cases are important, as this RPC can be called
// either through the network or directly if the caller is local.
//
// It returns the derived context and a cleanup function to be called when
// servicing the RPC is done. The cleanup function will close the span and, in
// case the span was the child of a remote span and "snowball tracing" was
// enabled on that parent span, it serializes the local trace into the
// BatchResponse. The cleanup function takes the BatchResponse in which the
// response is to serialized. The BatchResponse can be nil in case no response
// is to be returned to the rpc caller.
func (n *Node) setupSpanForIncomingRPC(
ctx context.Context, isLocalRequest bool,
) (context.Context, func(*roachpb.BatchResponse)) {
// The operation name matches the one created by the interceptor in the
// remoteTrace case below.
const opName = "/cockroach.roachpb.Internal/Batch"
var newSpan, grpcSpan opentracing.Span
if isLocalRequest {
// This is a local request which circumvented gRPC. Start a span now.
ctx, newSpan = tracing.ChildSpan(ctx, opName)
} else {
grpcSpan = opentracing.SpanFromContext(ctx)
if grpcSpan == nil {
// If tracing information was passed via gRPC metadata, the gRPC interceptor
// should have opened a span for us. If not, open a span now (if tracing is
// disabled, this will be a noop span).
newSpan = n.storeCfg.AmbientCtx.Tracer.(*tracing.Tracer).StartRootSpan(
opName, n.storeCfg.AmbientCtx.LogTags(), tracing.NonRecordableSpan,
)
ctx = opentracing.ContextWithSpan(ctx, newSpan)
}
}
finishSpan := func(br *roachpb.BatchResponse) {
if newSpan != nil {
newSpan.Finish()
}
if br == nil {
return
}
if grpcSpan != nil {
// If this is a "snowball trace", we'll need to return all the recorded
// spans in the BatchResponse at the end of the request.
// We don't want to do this if the operation is on the same host, in which
// case everything is already part of the same recording.
if rec := tracing.GetRecording(grpcSpan); rec != nil {
br.CollectedSpans = append(br.CollectedSpans, rec...)
}
}
}
return ctx, finishSpan
}
// RangeFeed implements the roachpb.InternalServer interface.
func (n *Node) RangeFeed(
args *roachpb.RangeFeedRequest, stream roachpb.Internal_RangeFeedServer,
) error {
growstack.Grow()
pErr := n.stores.RangeFeed(args, stream)
if pErr != nil {
var event roachpb.RangeFeedEvent
event.SetValue(&roachpb.RangeFeedError{
Error: *pErr,
})
return stream.Send(&event)
}
return nil
}
| pkg/server/node.go | 1 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.003296830225735903,
0.00025077120517380536,
0.00016155935009010136,
0.00017123878933489323,
0.0004243867006152868
] |
{
"id": 1,
"code_window": [
"\t\tunaryInterceptors = append(unaryInterceptors,\n",
"\t\t\totgrpc.OpenTracingClientInterceptor(tracer,\n",
"\t\t\t\totgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient))))\n",
"\t}\n",
"\tif ctx.testingKnobs.UnaryClientInterceptor != nil {\n",
"\t\ttestingUnaryInterceptor := ctx.testingKnobs.UnaryClientInterceptor(target, class)\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// We use a SpanInclusionFunc to circumvent the interceptor's work when\n",
"\t\t\t\t// tracing is disabled. Otherwise, the interceptor causes an increase in\n",
"\t\t\t\t// the number of packets (even with an empty context!). See #17177.\n",
"\t\t\t\totgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient)),\n",
"\t\t\t\t// We use a decorator to set the \"node\" tag. All other spans get the\n",
"\t\t\t\t// node tag from context log tags.\n",
"\t\t\t\t//\n",
"\t\t\t\t// Unfortunately we cannot use the corresponding interceptor on the\n",
"\t\t\t\t// server-side of gRPC to set this tag on server spans because that\n",
"\t\t\t\t// interceptor runs too late - after a traced RPC's recording had\n",
"\t\t\t\t// already been collected. So, on the server-side, the equivalent code\n",
"\t\t\t\t// is in setupSpanForIncomingRPC().\n",
"\t\t\t\totgrpc.SpanDecorator(func(span opentracing.Span, _ string, _, _ interface{}, _ error) {\n",
"\t\t\t\t\tspan.SetTag(\"node\", ctx.NodeID.String())\n",
"\t\t\t\t})))\n"
],
"file_path": "pkg/rpc/context.go",
"type": "replace",
"edit_start_line_idx": 697
} | // Code generated by TestPretty. DO NOT EDIT.
// GENERATED FILE DO NOT EDIT
1:
-
SELECT
1
= 1,
(
1
+ 2
)
= (
3
* foo(
x
)
),
x
= y::INT8,
y[123]
= min(
z
),
a
= ANY (
SELECT
123
)
8:
--------
SELECT 1
= 1,
(
1
+ 2
)
= (
3
* foo(
x
)
),
x
= y::INT8,
y[123]
= min(
z
),
a
= ANY (
SELECT
123
)
13:
-------------
SELECT 1 = 1,
(
1 + 2
)
= (
3
* foo(
x
)
),
x
= y::INT8,
y[123]
= min(
z
),
a
= ANY (
SELECT
123
)
14:
--------------
SELECT 1 = 1,
(1 + 2)
= (
3
* foo(
x
)
),
x
= y::INT8,
y[123]
= min(
z
),
a
= ANY (
SELECT
123
)
16:
----------------
SELECT 1 = 1,
(1 + 2)
= (
3
* foo(
x
)
),
x
= y::INT8,
y[123]
= min(z),
a
= ANY (
SELECT
123
)
19:
-------------------
SELECT 1 = 1,
(1 + 2)
= (
3
* foo(
x
)
),
x = y::INT8,
y[123]
= min(z),
a
= ANY (
SELECT
123
)
20:
--------------------
SELECT 1 = 1,
(1 + 2)
= (
3
* foo(x)
),
x = y::INT8,
y[123]
= min(z),
a
= ANY (
SELECT
123
)
22:
----------------------
SELECT 1 = 1,
(1 + 2)
= (3 * foo(x)),
x = y::INT8,
y[123]
= min(z),
a
= ANY (
SELECT 123
)
23:
-----------------------
SELECT 1 = 1,
(1 + 2)
= (3 * foo(x)),
x = y::INT8,
y[123] = min(z),
a
= ANY (
SELECT 123
)
25:
-------------------------
SELECT 1 = 1,
(1 + 2)
= (3 * foo(x)),
x = y::INT8,
y[123] = min(z),
a
= ANY (SELECT 123)
27:
---------------------------
SELECT 1 = 1,
(1 + 2)
= (3 * foo(x)),
x = y::INT8,
y[123] = min(z),
a = ANY (SELECT 123)
30:
------------------------------
SELECT 1 = 1,
(1 + 2) = (3 * foo(x)),
x = y::INT8,
y[123] = min(z),
a = ANY (SELECT 123)
88:
----------------------------------------------------------------------------------------
SELECT 1 = 1, (1 + 2) = (3 * foo(x)), x = y::INT8, y[123] = min(z), a = ANY (SELECT 123)
| pkg/sql/sem/tree/testdata/pretty/comparison.align-deindent.golden | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.00017649993242230266,
0.00017331579874735326,
0.00016902945935726166,
0.00017329928232356906,
0.0000018453954453434562
] |
{
"id": 1,
"code_window": [
"\t\tunaryInterceptors = append(unaryInterceptors,\n",
"\t\t\totgrpc.OpenTracingClientInterceptor(tracer,\n",
"\t\t\t\totgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient))))\n",
"\t}\n",
"\tif ctx.testingKnobs.UnaryClientInterceptor != nil {\n",
"\t\ttestingUnaryInterceptor := ctx.testingKnobs.UnaryClientInterceptor(target, class)\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// We use a SpanInclusionFunc to circumvent the interceptor's work when\n",
"\t\t\t\t// tracing is disabled. Otherwise, the interceptor causes an increase in\n",
"\t\t\t\t// the number of packets (even with an empty context!). See #17177.\n",
"\t\t\t\totgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient)),\n",
"\t\t\t\t// We use a decorator to set the \"node\" tag. All other spans get the\n",
"\t\t\t\t// node tag from context log tags.\n",
"\t\t\t\t//\n",
"\t\t\t\t// Unfortunately we cannot use the corresponding interceptor on the\n",
"\t\t\t\t// server-side of gRPC to set this tag on server spans because that\n",
"\t\t\t\t// interceptor runs too late - after a traced RPC's recording had\n",
"\t\t\t\t// already been collected. So, on the server-side, the equivalent code\n",
"\t\t\t\t// is in setupSpanForIncomingRPC().\n",
"\t\t\t\totgrpc.SpanDecorator(func(span opentracing.Span, _ string, _, _ interface{}, _ error) {\n",
"\t\t\t\t\tspan.SetTag(\"node\", ctx.NodeID.String())\n",
"\t\t\t\t})))\n"
],
"file_path": "pkg/rpc/context.go",
"type": "replace",
"edit_start_line_idx": 697
} | #!/usr/bin/env bash
set -euxo pipefail
write_teamcity_config() {
sudo -u agent tee /home/agent/conf/buildAgent.properties <<EOF
serverUrl=https://teamcity.cockroachdb.com
name=
workDir=../work
tempDir=../temp
systemDir=../system
EOF
}
# Avoid saving any Bash history.
HISTSIZE=0
# At the time of writing we really want 1.11, but that doesn't
# exist in the PPA yet.
GOVERS=1.10
# Add third-party APT repositories.
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0EBFCD88
cat > /etc/apt/sources.list.d/docker.list <<EOF
deb https://download.docker.com/linux/ubuntu xenial stable
EOF
apt-add-repository ppa:webupd8team/java
add-apt-repository ppa:gophers/archive
# Git 2.7, which ships with Xenial, has a bug where submodule metadata sometimes
# uses absolute paths instead of relative paths, which means the affected
# submodules cannot be mounted in Docker containers. Use the latest version of
# Git until we upgrade to a newer Ubuntu distribution.
add-apt-repository ppa:git-core/ppa
apt-get update --yes
# Auto-accept the Oracle Java license agreement.
debconf-set-selections <<< "oracle-java8-installer shared/accepted-oracle-license-v1-1 select true"
# Install the necessary dependencies. Keep this list small!
apt-get install --yes \
docker-ce \
docker-compose \
gnome-keyring \
git \
golang-${GOVERS} \
oracle-java8-installer \
unzip
# Installing gnome-keyring prevents the error described in
# https://github.com/moby/moby/issues/34048
# Link Go into the PATH; the PPA installs it into /usr/lib/go-1.x/bin.
ln -s /usr/lib/go-${GOVERS}/bin/go /usr/bin/go
# Add a user for the TeamCity agent with Docker rights.
adduser agent --disabled-password
adduser agent docker
# Download the TeamCity agent code and install its configuration.
# N.B.: This must be done as the agent user.
su - agent <<'EOF'
set -euxo pipefail
echo 'export GOPATH="$HOME"/work/.go' >> .profile && source .profile
wget https://teamcity.cockroachdb.com/update/buildAgent.zip
unzip buildAgent.zip
rm buildAgent.zip
# Cache the current version of the main Cockroach repository on the agent to
# speed up the first build. As of 2017-10-13, the main repository is 450MB (!).
# The other repositories we run CI on are small enough not to be worth caching,
# but feel free to add them if it becomes necessary.
#
# WARNING: This uses undocumented implementation details of TeamCity's Git
# alternate system.
git clone --bare https://github.com/cockroachdb/cockroach system/git/cockroach.git
cat > system/git/map <<EOS
https://github.com/cockroachdb/cockroach = cockroach.git
EOS
# For master and the last two release, download the builder and acceptance
# containers.
repo="$GOPATH"/src/github.com/cockroachdb/cockroach
git clone --shared system/git/cockroach.git "$repo"
cd "$repo"
# Work around a bug in the builder's git version (at the time of writing)
# which would corrupt the submodule defs. Probably good to remove once the
# builder uses Ubuntu 18.04 or higher.
git submodule update --init --recursive
for branch in $(git branch --all --list --sort=-committerdate 'origin/release-*' | head -n1) master
do
git checkout "$branch"
COCKROACH_BUILDER_CCACHE=1 build/builder.sh make test testrace TESTS=-
# TODO(benesch): store the acceptanceversion somewhere more accessible.
docker pull $(git grep cockroachdb/acceptance -- '*.go' | sed -E 's/.*"([^"]*).*"/\1/') || true
done
cd -
EOF
write_teamcity_config
# Configure the Teamcity agent to start when the server starts.
#
# systemd will nuke the auto-upgrade process unless we mark the service as
# "oneshot". This has the unfortunate side-effect of making `systemctl start
# teamcity-agent` hang forever when run manually, but it at least works when the
# system starts the service at bootup.
#
# TODO(benesch): see if we can fix this with Type=forking, KillMode=process.
cat > /etc/systemd/system/teamcity-agent.service <<EOF
[Unit]
Description=TeamCity Build Agent
After=network.target
Requires=network.target
[Service]
Type=oneshot
RemainAfterExit=yes
User=agent
PIDFile=/home/agent/logs/buildAgent.pid
ExecStart=/home/agent/bin/agent.sh start
ExecStop=/home/agent/bin/agent.sh stop
SuccessExitStatus=0 143
[Install]
WantedBy=multi-user.target
EOF
systemctl enable teamcity-agent.service
# Boot the TeamCity agent so it can be upgraded by the server (i.e., download
# and install whatever plugins the server has installed) before we bake the
# image.
#
# WARNING: There seems to be no clean way to check when the upgrade is complete.
# As a hack, the string below seems to appear in the logs iff the upgrade is
# successful.
systemctl start teamcity-agent.service
until grep -q 'Updating agent parameters on the server' /home/agent/logs/teamcity-agent.log
do
echo .
sleep 5
done
# Re-write the TeamCity config to discard the name and authorization token
# assigned by the TeamCity server; otherwise, agents created from this image
# might look like unauthorized duplicates to the TeamCity server.
systemctl stop teamcity-agent.service
write_teamcity_config
# Prepare for imaging by removing unnecessary files.
rm -rf /home/agent/logs
apt-get clean
sync
| build/packer/teamcity-agent.sh | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.00017590854258742183,
0.0001727001363178715,
0.0001684223097981885,
0.00017302119522355497,
0.0000021902337721257936
] |
{
"id": 1,
"code_window": [
"\t\tunaryInterceptors = append(unaryInterceptors,\n",
"\t\t\totgrpc.OpenTracingClientInterceptor(tracer,\n",
"\t\t\t\totgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient))))\n",
"\t}\n",
"\tif ctx.testingKnobs.UnaryClientInterceptor != nil {\n",
"\t\ttestingUnaryInterceptor := ctx.testingKnobs.UnaryClientInterceptor(target, class)\n"
],
"labels": [
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t\t\t// We use a SpanInclusionFunc to circumvent the interceptor's work when\n",
"\t\t\t\t// tracing is disabled. Otherwise, the interceptor causes an increase in\n",
"\t\t\t\t// the number of packets (even with an empty context!). See #17177.\n",
"\t\t\t\totgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient)),\n",
"\t\t\t\t// We use a decorator to set the \"node\" tag. All other spans get the\n",
"\t\t\t\t// node tag from context log tags.\n",
"\t\t\t\t//\n",
"\t\t\t\t// Unfortunately we cannot use the corresponding interceptor on the\n",
"\t\t\t\t// server-side of gRPC to set this tag on server spans because that\n",
"\t\t\t\t// interceptor runs too late - after a traced RPC's recording had\n",
"\t\t\t\t// already been collected. So, on the server-side, the equivalent code\n",
"\t\t\t\t// is in setupSpanForIncomingRPC().\n",
"\t\t\t\totgrpc.SpanDecorator(func(span opentracing.Span, _ string, _, _ interface{}, _ error) {\n",
"\t\t\t\t\tspan.SetTag(\"node\", ctx.NodeID.String())\n",
"\t\t\t\t})))\n"
],
"file_path": "pkg/rpc/context.go",
"type": "replace",
"edit_start_line_idx": 697
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// +build !windows
//lint:file-ignore Unconvert (redundant conversions are necessary for cross-platform compatibility)
package sysutil
import (
"fmt"
"math"
"os"
"syscall"
"golang.org/x/sys/unix"
)
// ProcessIdentity returns a string describing the user and group that this
// process is running as.
func ProcessIdentity() string {
return fmt.Sprintf("uid %d euid %d gid %d egid %d",
unix.Getuid(), unix.Geteuid(), unix.Getgid(), unix.Getegid())
}
// StatFS returns an FSInfo describing the named filesystem. It is only
// supported on Unix-like platforms.
func StatFS(path string) (*FSInfo, error) {
var fs unix.Statfs_t
if err := unix.Statfs(path, &fs); err != nil {
return nil, err
}
// Statfs_t's fields have different types on different platforms. Our FSInfo
// type uses int64s for all fields, so make sure the values returned by the OS
// will fit.
if uint64(fs.Bfree) > math.MaxInt64 ||
uint64(fs.Bavail) > math.MaxInt64 ||
uint64(fs.Blocks) > math.MaxInt64 ||
uint64(fs.Bsize) > math.MaxInt64 {
return nil, fmt.Errorf("statfs syscall returned unrepresentable value %#v", fs)
}
return &FSInfo{
FreeBlocks: int64(fs.Bfree),
AvailBlocks: int64(fs.Bavail),
TotalBlocks: int64(fs.Blocks),
BlockSize: int64(fs.Bsize),
}, nil
}
// StatAndLinkCount wraps os.Stat, returning its result and, if the platform
// supports it, the link-count from the returned file info.
func StatAndLinkCount(path string) (os.FileInfo, int64, error) {
stat, err := os.Stat(path)
if err != nil {
return stat, 0, err
}
if sys := stat.Sys(); sys != nil {
if s, ok := sys.(*syscall.Stat_t); ok {
return stat, int64(s.Nlink), nil
}
}
return stat, 0, nil
}
// IsCrossDeviceLinkErrno checks whether the given error object (as
// extracted from an *os.LinkError) is a cross-device link/rename
// error.
func IsCrossDeviceLinkErrno(errno error) bool {
return errno == syscall.EXDEV
}
| pkg/util/sysutil/sysutil_unix.go | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.000190241466043517,
0.00017562700668349862,
0.00016632210463285446,
0.00017441896488890052,
0.000006810924332967261
] |
{
"id": 2,
"code_window": [
"\t\t\t\topName, n.storeCfg.AmbientCtx.LogTags(), tracing.NonRecordableSpan,\n",
"\t\t\t)\n",
"\t\t\tctx = opentracing.ContextWithSpan(ctx, newSpan)\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tfinishSpan := func(br *roachpb.BatchResponse) {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t} else {\n",
"\t\t\tgrpcSpan.SetTag(\"node\", n.Descriptor.NodeID)\n"
],
"file_path": "pkg/server/node.go",
"type": "add",
"edit_start_line_idx": 954
} | // Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rpc
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"io"
"math"
"net"
"sync"
"sync/atomic"
"time"
circuit "github.com/cockroachdb/circuitbreaker"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/util/contextutil"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/growstack"
"github.com/cockroachdb/cockroach/pkg/util/grpcutil"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/netutil"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"golang.org/x/sync/syncmap"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
encodingproto "google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
)
func init() {
// Disable GRPC tracing. This retains a subset of messages for
// display on /debug/requests, which is very expensive for
// snapshots. Until we can be more selective about what is retained
// in traces, we must disable tracing entirely.
// https://github.com/grpc/grpc-go/issues/695
grpc.EnableTracing = false
}
const (
// The coefficient by which the maximum offset is multiplied to determine the
// maximum acceptable measurement latency.
maximumPingDurationMult = 2
)
const (
defaultWindowSize = 65535
initialWindowSize = defaultWindowSize * 32 // for an RPC
initialConnWindowSize = initialWindowSize * 16 // for a connection
)
// sourceAddr is the environment-provided local address for outgoing
// connections.
var sourceAddr = func() net.Addr {
const envKey = "COCKROACH_SOURCE_IP_ADDRESS"
if sourceAddr, ok := envutil.EnvString(envKey, 0); ok {
sourceIP := net.ParseIP(sourceAddr)
if sourceIP == nil {
panic(fmt.Sprintf("unable to parse %s '%s' as IP address", envKey, sourceAddr))
}
return &net.TCPAddr{
IP: sourceIP,
}
}
return nil
}()
var enableRPCCompression = envutil.EnvOrDefaultBool("COCKROACH_ENABLE_RPC_COMPRESSION", true)
// spanInclusionFuncForServer is used as a SpanInclusionFunc for the server-side
// of RPCs, deciding for which operations the gRPC opentracing interceptor should
// create a span.
func spanInclusionFuncForServer(
t *tracing.Tracer, parentSpanCtx opentracing.SpanContext, method string, req, resp interface{},
) bool {
// Is client tracing?
return (parentSpanCtx != nil && !tracing.IsNoopContext(parentSpanCtx)) ||
// Should we trace regardless of the client? This is useful for calls coming
// through the HTTP->RPC gateway (i.e. the AdminUI), where client is never
// tracing.
t.AlwaysTrace()
}
// spanInclusionFuncForClient is used as a SpanInclusionFunc for the client-side
// of RPCs, deciding for which operations the gRPC opentracing interceptor should
// create a span.
func spanInclusionFuncForClient(
parentSpanCtx opentracing.SpanContext, method string, req, resp interface{},
) bool {
return parentSpanCtx != nil && !tracing.IsNoopContext(parentSpanCtx)
}
func requireSuperUser(ctx context.Context) error {
// TODO(marc): grpc's authentication model (which gives credential access in
// the request handler) doesn't really fit with the current design of the
// security package (which assumes that TLS state is only given at connection
// time) - that should be fixed.
if grpcutil.IsLocalRequestContext(ctx) {
// This is an in-process request. Bypass authentication check.
} else if peer, ok := peer.FromContext(ctx); ok {
if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok {
certUsers, err := security.GetCertificateUsers(&tlsInfo.State)
if err != nil {
return err
}
// TODO(benesch): the vast majority of RPCs should be limited to just
// NodeUser. This is not a security concern, as RootUser has access to
// read and write all data, merely good hygiene. For example, there is
// no reason to permit the root user to send raw Raft RPCs.
if !security.ContainsUser(security.NodeUser, certUsers) &&
!security.ContainsUser(security.RootUser, certUsers) {
return errors.Errorf("user %s is not allowed to perform this RPC", certUsers)
}
}
} else {
return errors.New("internal authentication error: TLSInfo is not available in request context")
}
return nil
}
// NewServer is a thin wrapper around grpc.NewServer that registers a heartbeat
// service.
func NewServer(ctx *Context) *grpc.Server {
return NewServerWithInterceptor(ctx, nil)
}
// NewServerWithInterceptor is like NewServer, but accepts an additional
// interceptor which is called before streaming and unary RPCs and may inject an
// error.
func NewServerWithInterceptor(
ctx *Context, interceptor func(fullMethod string) error,
) *grpc.Server {
opts := []grpc.ServerOption{
// The limiting factor for lowering the max message size is the fact
// that a single large kv can be sent over the network in one message.
// Our maximum kv size is unlimited, so we need this to be very large.
//
// TODO(peter,tamird): need tests before lowering.
grpc.MaxRecvMsgSize(math.MaxInt32),
grpc.MaxSendMsgSize(math.MaxInt32),
// Adjust the stream and connection window sizes. The gRPC defaults are too
// low for high latency connections.
grpc.InitialWindowSize(initialWindowSize),
grpc.InitialConnWindowSize(initialConnWindowSize),
// The default number of concurrent streams/requests on a client connection
// is 100, while the server is unlimited. The client setting can only be
// controlled by adjusting the server value. Set a very large value for the
// server value so that we have no fixed limit on the number of concurrent
// streams/requests on either the client or server.
grpc.MaxConcurrentStreams(math.MaxInt32),
grpc.KeepaliveParams(serverKeepalive),
grpc.KeepaliveEnforcementPolicy(serverEnforcement),
// A stats handler to measure server network stats.
grpc.StatsHandler(&ctx.stats),
}
if !ctx.Insecure {
tlsConfig, err := ctx.GetServerTLSConfig()
if err != nil {
panic(err)
}
opts = append(opts, grpc.Creds(credentials.NewTLS(tlsConfig)))
}
var unaryInterceptor grpc.UnaryServerInterceptor
var streamInterceptor grpc.StreamServerInterceptor
if tracer := ctx.AmbientCtx.Tracer; tracer != nil {
// We use a SpanInclusionFunc to save a bit of unnecessary work when
// tracing is disabled.
unaryInterceptor = otgrpc.OpenTracingServerInterceptor(
tracer,
otgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(
func(
parentSpanCtx opentracing.SpanContext,
method string,
req, resp interface{}) bool {
// This anonymous func serves to bind the tracer for
// spanInclusionFuncForServer.
return spanInclusionFuncForServer(
tracer.(*tracing.Tracer), parentSpanCtx, method, req, resp)
})),
)
// TODO(tschottdorf): should set up tracing for stream-based RPCs as
// well. The otgrpc package has no such facility, but there's also this:
//
// https://github.com/grpc-ecosystem/go-grpc-middleware/tree/master/tracing/opentracing
}
// TODO(tschottdorf): when setting up the interceptors below, could make the
// functions a wee bit more performant by hoisting some of the nil checks
// out. Doubt measurements can tell the difference though.
if interceptor != nil {
prevUnaryInterceptor := unaryInterceptor
unaryInterceptor = func(
ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (interface{}, error) {
if err := interceptor(info.FullMethod); err != nil {
return nil, err
}
if prevUnaryInterceptor != nil {
return prevUnaryInterceptor(ctx, req, info, handler)
}
return handler(ctx, req)
}
}
if interceptor != nil {
prevStreamInterceptor := streamInterceptor
streamInterceptor = func(
srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler,
) error {
if err := interceptor(info.FullMethod); err != nil {
return err
}
if prevStreamInterceptor != nil {
return prevStreamInterceptor(srv, stream, info, handler)
}
return handler(srv, stream)
}
}
if !ctx.Insecure {
prevUnaryInterceptor := unaryInterceptor
unaryInterceptor = func(
ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler,
) (interface{}, error) {
if err := requireSuperUser(ctx); err != nil {
return nil, err
}
if prevUnaryInterceptor != nil {
return prevUnaryInterceptor(ctx, req, info, handler)
}
return handler(ctx, req)
}
prevStreamInterceptor := streamInterceptor
streamInterceptor = func(
srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler,
) error {
if err := requireSuperUser(stream.Context()); err != nil {
return err
}
if prevStreamInterceptor != nil {
return prevStreamInterceptor(srv, stream, info, handler)
}
return handler(srv, stream)
}
}
if unaryInterceptor != nil {
opts = append(opts, grpc.UnaryInterceptor(unaryInterceptor))
}
if streamInterceptor != nil {
opts = append(opts, grpc.StreamInterceptor(streamInterceptor))
}
s := grpc.NewServer(opts...)
RegisterHeartbeatServer(s, &HeartbeatService{
clock: ctx.LocalClock,
remoteClockMonitor: ctx.RemoteClocks,
clusterName: ctx.clusterName,
disableClusterNameVerification: ctx.disableClusterNameVerification,
clusterID: &ctx.ClusterID,
nodeID: &ctx.NodeID,
settings: ctx.settings,
testingAllowNamedRPCToAnonymousServer: ctx.TestingAllowNamedRPCToAnonymousServer,
})
return s
}
type heartbeatResult struct {
everSucceeded bool // true if the heartbeat has ever succeeded
err error // heartbeat error, initialized to ErrNotHeartbeated
}
// state is a helper to return the heartbeatState implied by a heartbeatResult.
func (hr heartbeatResult) state() (s heartbeatState) {
switch {
case !hr.everSucceeded && hr.err != nil:
s = heartbeatInitializing
case hr.everSucceeded && hr.err == nil:
s = heartbeatNominal
case hr.everSucceeded && hr.err != nil:
s = heartbeatFailed
}
return s
}
// Connection is a wrapper around grpc.ClientConn. It prevents the underlying
// connection from being used until it has been validated via heartbeat.
type Connection struct {
grpcConn *grpc.ClientConn
dialErr error // error while dialing; if set, connection is unusable
heartbeatResult atomic.Value // result of latest heartbeat
initialHeartbeatDone chan struct{} // closed after first heartbeat
stopper *stop.Stopper
// remoteNodeID implies checking the remote node ID. 0 when unknown,
// non-zero to check with remote node. This is constant throughout
// the lifetime of a Connection object.
remoteNodeID roachpb.NodeID
initOnce sync.Once
}
func newConnectionToNodeID(stopper *stop.Stopper, remoteNodeID roachpb.NodeID) *Connection {
c := &Connection{
initialHeartbeatDone: make(chan struct{}),
stopper: stopper,
remoteNodeID: remoteNodeID,
}
c.heartbeatResult.Store(heartbeatResult{err: ErrNotHeartbeated})
return c
}
// Connect returns the underlying grpc.ClientConn after it has been validated,
// or an error if dialing or validation fails.
func (c *Connection) Connect(ctx context.Context) (*grpc.ClientConn, error) {
if c.dialErr != nil {
return nil, c.dialErr
}
// Wait for initial heartbeat.
select {
case <-c.initialHeartbeatDone:
case <-c.stopper.ShouldStop():
return nil, errors.Errorf("stopped")
case <-ctx.Done():
return nil, ctx.Err()
}
// If connection is invalid, return latest heartbeat error.
h := c.heartbeatResult.Load().(heartbeatResult)
if !h.everSucceeded {
// If we've never succeeded, h.err will be ErrNotHeartbeated.
return nil, netutil.NewInitialHeartBeatFailedError(h.err)
}
return c.grpcConn, nil
}
// Health returns an error indicating the success or failure of the
// connection's latest heartbeat. Returns ErrNotHeartbeated if the
// first heartbeat has not completed.
func (c *Connection) Health() error {
return c.heartbeatResult.Load().(heartbeatResult).err
}
// Context contains the fields required by the rpc framework.
type Context struct {
*base.Config
AmbientCtx log.AmbientContext
LocalClock *hlc.Clock
breakerClock breakerClock
Stopper *stop.Stopper
RemoteClocks *RemoteClockMonitor
masterCtx context.Context
heartbeatInterval time.Duration
heartbeatTimeout time.Duration
HeartbeatCB func()
rpcCompression bool
localInternalClient roachpb.InternalClient
conns syncmap.Map
stats StatsHandler
ClusterID base.ClusterIDContainer
NodeID base.NodeIDContainer
settings *cluster.Settings
clusterName string
disableClusterNameVerification bool
metrics Metrics
// For unittesting.
BreakerFactory func() *circuit.Breaker
testingDialOpts []grpc.DialOption
testingKnobs ContextTestingKnobs
// For testing. See the comment on the same field in HeartbeatService.
TestingAllowNamedRPCToAnonymousServer bool
}
// connKey is used as key in the Context.conns map.
// Connections which carry a different class but share a target and nodeID
// will always specify distinct connections. Different remote node IDs get
// distinct *Connection objects to ensure that we don't mis-route RPC
// requests in the face of address reuse. Gossip connections and other
// non-Internal users of the Context are free to dial nodes without
// specifying a node ID (see GRPCUnvalidatedDial()) however later calls to
// Dial with the same target and class with a node ID will create a new
// underlying connection. The inverse however is not true, a connection
// dialed without a node ID will use an existing connection to a matching
// (targetAddr, class) pair.
type connKey struct {
targetAddr string
nodeID roachpb.NodeID
class ConnectionClass
}
// NewContext creates an rpc Context with the supplied values.
func NewContext(
ambient log.AmbientContext,
baseCtx *base.Config,
hlcClock *hlc.Clock,
stopper *stop.Stopper,
st *cluster.Settings,
) *Context {
return NewContextWithTestingKnobs(ambient, baseCtx, hlcClock, stopper, st,
ContextTestingKnobs{})
}
// NewContextWithTestingKnobs creates an rpc Context with the supplied values.
func NewContextWithTestingKnobs(
ambient log.AmbientContext,
baseCtx *base.Config,
hlcClock *hlc.Clock,
stopper *stop.Stopper,
st *cluster.Settings,
knobs ContextTestingKnobs,
) *Context {
if hlcClock == nil {
panic("nil clock is forbidden")
}
ctx := &Context{
AmbientCtx: ambient,
Config: baseCtx,
LocalClock: hlcClock,
breakerClock: breakerClock{
clock: hlcClock,
},
rpcCompression: enableRPCCompression,
settings: st,
clusterName: baseCtx.ClusterName,
disableClusterNameVerification: baseCtx.DisableClusterNameVerification,
testingKnobs: knobs,
}
var cancel context.CancelFunc
ctx.masterCtx, cancel = context.WithCancel(ambient.AnnotateCtx(context.Background()))
ctx.Stopper = stopper
ctx.heartbeatInterval = baseCtx.RPCHeartbeatInterval
ctx.RemoteClocks = newRemoteClockMonitor(
ctx.LocalClock, 10*ctx.heartbeatInterval, baseCtx.HistogramWindowInterval)
ctx.heartbeatTimeout = 2 * ctx.heartbeatInterval
ctx.metrics = makeMetrics()
stopper.RunWorker(ctx.masterCtx, func(context.Context) {
<-stopper.ShouldQuiesce()
cancel()
ctx.conns.Range(func(k, v interface{}) bool {
conn := v.(*Connection)
conn.initOnce.Do(func() {
// Make sure initialization is not in progress when we're removing the
// conn. We need to set the error in case we win the race against the
// real initialization code.
if conn.dialErr == nil {
conn.dialErr = &roachpb.NodeUnavailableError{}
}
})
ctx.removeConn(conn, k.(connKey))
return true
})
})
if knobs.ClusterID != nil {
ctx.ClusterID.Set(ctx.masterCtx, *knobs.ClusterID)
}
return ctx
}
// ClusterName retrieves the configured cluster name.
func (ctx *Context) ClusterName() string {
if ctx == nil {
// This is used in tests.
return "<MISSING RPC CONTEXT>"
}
return ctx.clusterName
}
// GetStatsMap returns a map of network statistics maintained by the
// internal stats handler. The map is from the remote network address
// (in string form) to an rpc.Stats object.
func (ctx *Context) GetStatsMap() *syncmap.Map {
return &ctx.stats.stats
}
// Metrics returns the Context's Metrics struct.
func (ctx *Context) Metrics() *Metrics {
return &ctx.metrics
}
// GetLocalInternalClientForAddr returns the context's internal batch client
// for target, if it exists.
func (ctx *Context) GetLocalInternalClientForAddr(
target string, nodeID roachpb.NodeID,
) roachpb.InternalClient {
if target == ctx.AdvertiseAddr && nodeID == ctx.NodeID.Get() {
return ctx.localInternalClient
}
return nil
}
type internalClientAdapter struct {
roachpb.InternalServer
}
func (a internalClientAdapter) Batch(
ctx context.Context, ba *roachpb.BatchRequest, _ ...grpc.CallOption,
) (*roachpb.BatchResponse, error) {
return a.InternalServer.Batch(ctx, ba)
}
type rangeFeedClientAdapter struct {
ctx context.Context
eventC chan *roachpb.RangeFeedEvent
errC chan error
}
// roachpb.Internal_RangeFeedServer methods.
func (a rangeFeedClientAdapter) Recv() (*roachpb.RangeFeedEvent, error) {
// Prioritize eventC. Both channels are buffered and the only guarantee we
// have is that once an error is sent on errC no other events will be sent
// on eventC again.
select {
case e := <-a.eventC:
return e, nil
case err := <-a.errC:
select {
case e := <-a.eventC:
a.errC <- err
return e, nil
default:
return nil, err
}
}
}
// roachpb.Internal_RangeFeedServer methods.
func (a rangeFeedClientAdapter) Send(e *roachpb.RangeFeedEvent) error {
select {
case a.eventC <- e:
return nil
case <-a.ctx.Done():
return a.ctx.Err()
}
}
// grpc.ClientStream methods.
func (rangeFeedClientAdapter) Header() (metadata.MD, error) { panic("unimplemented") }
func (rangeFeedClientAdapter) Trailer() metadata.MD { panic("unimplemented") }
func (rangeFeedClientAdapter) CloseSend() error { panic("unimplemented") }
// grpc.ServerStream methods.
func (rangeFeedClientAdapter) SetHeader(metadata.MD) error { panic("unimplemented") }
func (rangeFeedClientAdapter) SendHeader(metadata.MD) error { panic("unimplemented") }
func (rangeFeedClientAdapter) SetTrailer(metadata.MD) { panic("unimplemented") }
// grpc.Stream methods.
func (a rangeFeedClientAdapter) Context() context.Context { return a.ctx }
func (rangeFeedClientAdapter) SendMsg(m interface{}) error { panic("unimplemented") }
func (rangeFeedClientAdapter) RecvMsg(m interface{}) error { panic("unimplemented") }
var _ roachpb.Internal_RangeFeedClient = rangeFeedClientAdapter{}
var _ roachpb.Internal_RangeFeedServer = rangeFeedClientAdapter{}
func (a internalClientAdapter) RangeFeed(
ctx context.Context, args *roachpb.RangeFeedRequest, _ ...grpc.CallOption,
) (roachpb.Internal_RangeFeedClient, error) {
ctx, cancel := context.WithCancel(ctx)
rfAdapter := rangeFeedClientAdapter{
ctx: ctx,
eventC: make(chan *roachpb.RangeFeedEvent, 128),
errC: make(chan error, 1),
}
go func() {
defer cancel()
err := a.InternalServer.RangeFeed(args, rfAdapter)
if err == nil {
err = io.EOF
}
rfAdapter.errC <- err
}()
return rfAdapter, nil
}
var _ roachpb.InternalClient = internalClientAdapter{}
// IsLocal returns true if the given InternalClient is local.
func IsLocal(iface roachpb.InternalClient) bool {
_, ok := iface.(internalClientAdapter)
return ok // internalClientAdapter is used for local connections.
}
// SetLocalInternalServer sets the context's local internal batch server.
func (ctx *Context) SetLocalInternalServer(internalServer roachpb.InternalServer) {
ctx.localInternalClient = internalClientAdapter{internalServer}
}
// removeConn removes the given connection from the pool. The supplied connKeys
// must represent *all* the keys under among which the connection was shared.
func (ctx *Context) removeConn(conn *Connection, keys ...connKey) {
for _, key := range keys {
ctx.conns.Delete(key)
}
if log.V(1) {
log.Infof(ctx.masterCtx, "closing %+v", keys)
}
if grpcConn := conn.grpcConn; grpcConn != nil {
if err := grpcConn.Close(); err != nil && !grpcutil.IsClosedConnection(err) {
if log.V(1) {
log.Errorf(ctx.masterCtx, "failed to close client connection: %v", err)
}
}
}
}
// GRPCDialOptions returns the minimal `grpc.DialOption`s necessary to connect
// to a server created with `NewServer`.
//
// At the time of writing, this is being used for making net.Pipe-based
// connections, so only those options that affect semantics are included. In
// particular, performance tuning options are omitted. Decompression is
// necessarily included to support compression-enabled servers, and compression
// is included for symmetry. These choices are admittedly subjective.
func (ctx *Context) GRPCDialOptions() ([]grpc.DialOption, error) {
return ctx.grpcDialOptions("", DefaultClass)
}
// grpcDialOptions extends GRPCDialOptions to support a connection class for use
// with TestingKnobs.
func (ctx *Context) grpcDialOptions(
target string, class ConnectionClass,
) ([]grpc.DialOption, error) {
var dialOpts []grpc.DialOption
if ctx.Insecure {
dialOpts = append(dialOpts, grpc.WithInsecure())
} else {
tlsConfig, err := ctx.GetClientTLSConfig()
if err != nil {
return nil, err
}
dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
}
// The limiting factor for lowering the max message size is the fact
// that a single large kv can be sent over the network in one message.
// Our maximum kv size is unlimited, so we need this to be very large.
//
// TODO(peter,tamird): need tests before lowering.
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32),
grpc.MaxCallSendMsgSize(math.MaxInt32),
))
// Compression is enabled separately from decompression to allow staged
// rollout.
if ctx.rpcCompression {
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor((snappyCompressor{}).Name())))
}
var unaryInterceptors []grpc.UnaryClientInterceptor
if tracer := ctx.AmbientCtx.Tracer; tracer != nil {
// We use a SpanInclusionFunc to circumvent the interceptor's work when
// tracing is disabled. Otherwise, the interceptor causes an increase in
// the number of packets (even with an empty context!). See #17177.
unaryInterceptors = append(unaryInterceptors,
otgrpc.OpenTracingClientInterceptor(tracer,
otgrpc.IncludingSpans(otgrpc.SpanInclusionFunc(spanInclusionFuncForClient))))
}
if ctx.testingKnobs.UnaryClientInterceptor != nil {
testingUnaryInterceptor := ctx.testingKnobs.UnaryClientInterceptor(target, class)
if testingUnaryInterceptor != nil {
unaryInterceptors = append(unaryInterceptors, testingUnaryInterceptor)
}
}
dialOpts = append(dialOpts, grpc.WithChainUnaryInterceptor(unaryInterceptors...))
if ctx.testingKnobs.StreamClientInterceptor != nil {
testingStreamInterceptor := ctx.testingKnobs.StreamClientInterceptor(target, class)
if testingStreamInterceptor != nil {
dialOpts = append(dialOpts, grpc.WithStreamInterceptor(testingStreamInterceptor))
}
}
return dialOpts, nil
}
// growStackCodec wraps the default grpc/encoding/proto codec to detect
// BatchRequest rpcs and grow the stack prior to Unmarshaling.
type growStackCodec struct {
encoding.Codec
}
// Unmarshal detects BatchRequests and calls growstack.Grow before calling
// through to the underlying codec.
func (c growStackCodec) Unmarshal(data []byte, v interface{}) error {
if _, ok := v.(*roachpb.BatchRequest); ok {
growstack.Grow()
}
return c.Codec.Unmarshal(data, v)
}
// Install the growStackCodec over the default proto codec in order to grow the
// stack for BatchRequest RPCs prior to unmarshaling.
func init() {
protoCodec := encoding.GetCodec(encodingproto.Name)
encoding.RegisterCodec(growStackCodec{Codec: protoCodec})
}
// onlyOnceDialer implements the grpc.WithDialer interface but only
// allows a single connection attempt. If a reconnection is attempted,
// redialChan is closed to signal a higher-level retry loop. This
// ensures that our initial heartbeat (and its version/clusterID
// validation) occurs on every new connection.
type onlyOnceDialer struct {
syncutil.Mutex
dialed bool
closed bool
redialChan chan struct{}
}
func (ood *onlyOnceDialer) dial(ctx context.Context, addr string) (net.Conn, error) {
ood.Lock()
defer ood.Unlock()
if !ood.dialed {
ood.dialed = true
dialer := net.Dialer{
LocalAddr: sourceAddr,
}
return dialer.DialContext(ctx, "tcp", addr)
} else if !ood.closed {
ood.closed = true
close(ood.redialChan)
}
return nil, grpcutil.ErrCannotReuseClientConn
}
type dialerFunc func(context.Context, string) (net.Conn, error)
type artificialLatencyDialer struct {
dialerFunc dialerFunc
latencyMS int
}
func (ald *artificialLatencyDialer) dial(ctx context.Context, addr string) (net.Conn, error) {
conn, err := ald.dialerFunc(ctx, addr)
if err != nil {
return conn, err
}
return delayingConn{
Conn: conn,
latency: time.Duration(ald.latencyMS) * time.Millisecond,
readBuf: new(bytes.Buffer),
}, nil
}
type delayingListener struct {
net.Listener
}
// NewDelayingListener creates a net.Listener that introduces a set delay on its connections.
func NewDelayingListener(l net.Listener) net.Listener {
return delayingListener{Listener: l}
}
func (d delayingListener) Accept() (net.Conn, error) {
c, err := d.Listener.Accept()
if err != nil {
return nil, err
}
return delayingConn{
Conn: c,
// Put a default latency as the server's conn. This value will get populated
// as packets are exchanged across the delayingConnections.
latency: time.Duration(0) * time.Millisecond,
readBuf: new(bytes.Buffer),
}, nil
}
type delayingConn struct {
net.Conn
latency time.Duration
lastSendEnd time.Time
readBuf *bytes.Buffer
}
func (d delayingConn) Write(b []byte) (n int, err error) {
tNow := timeutil.Now()
if d.lastSendEnd.Before(tNow) {
d.lastSendEnd = tNow
}
hdr := delayingHeader{
Magic: magic,
ReadTime: d.lastSendEnd.Add(d.latency).UnixNano(),
Sz: int32(len(b)),
DelayMS: int32(d.latency / time.Millisecond),
}
if err := binary.Write(d.Conn, binary.BigEndian, hdr); err != nil {
return n, err
}
x, err := d.Conn.Write(b)
n += x
return n, err
}
func (d delayingConn) Read(b []byte) (n int, err error) {
if d.readBuf.Len() == 0 {
var hdr delayingHeader
if err := binary.Read(d.Conn, binary.BigEndian, &hdr); err != nil {
return 0, err
}
// If we somehow don't get our expected magic, throw an error.
if hdr.Magic != magic {
panic(errors.New("didn't get expected magic bytes header"))
// TODO (rohany): I can't get this to work. I suspect that the problem
// is with that maybe the improperly parsed struct is not written back
// into the same binary format that it was read as. I tried this with sending
// the magic integer over first and saw the same thing.
} else {
d.latency = time.Duration(hdr.DelayMS) * time.Millisecond
defer func() {
time.Sleep(timeutil.Until(timeutil.Unix(0, hdr.ReadTime)))
}()
if _, err := io.CopyN(d.readBuf, d.Conn, int64(hdr.Sz)); err != nil {
return 0, err
}
}
}
return d.readBuf.Read(b)
}
const magic = 0xfeedfeed
type delayingHeader struct {
Magic int64
ReadTime int64
Sz int32
DelayMS int32
}
// GRPCDialRaw calls grpc.Dial with options appropriate for the context.
// Unlike GRPCDialNode, it does not start an RPC heartbeat to validate the
// connection. This connection will not be reconnected automatically;
// the returned channel is closed when a reconnection is attempted.
// This method implies a DefaultClass ConnectionClass for the returned
// ClientConn.
func (ctx *Context) GRPCDialRaw(target string) (*grpc.ClientConn, <-chan struct{}, error) {
return ctx.grpcDialRaw(target, 0, DefaultClass)
}
func (ctx *Context) grpcDialRaw(
target string, remoteNodeID roachpb.NodeID, class ConnectionClass,
) (*grpc.ClientConn, <-chan struct{}, error) {
dialOpts, err := ctx.grpcDialOptions(target, class)
if err != nil {
return nil, nil, err
}
// Add a stats handler to measure client network stats.
dialOpts = append(dialOpts, grpc.WithStatsHandler(ctx.stats.newClient(target)))
dialOpts = append(dialOpts, grpc.WithBackoffMaxDelay(maxBackoff))
dialOpts = append(dialOpts, grpc.WithKeepaliveParams(clientKeepalive))
dialOpts = append(dialOpts,
grpc.WithInitialWindowSize(initialWindowSize),
grpc.WithInitialConnWindowSize(initialConnWindowSize))
dialer := onlyOnceDialer{
redialChan: make(chan struct{}),
}
dialerFunc := dialer.dial
if ctx.testingKnobs.ArtificialLatencyMap != nil {
latency := ctx.testingKnobs.ArtificialLatencyMap[target]
log.VEventf(ctx.masterCtx, 1, "Connecting to node %s (%d) with simulated latency %dms", target, remoteNodeID,
latency)
dialer := artificialLatencyDialer{
dialerFunc: dialerFunc,
latencyMS: latency,
}
dialerFunc = dialer.dial
}
dialOpts = append(dialOpts, grpc.WithContextDialer(dialerFunc))
// add testingDialOpts after our dialer because one of our tests
// uses a custom dialer (this disables the only-one-connection
// behavior and redialChan will never be closed).
dialOpts = append(dialOpts, ctx.testingDialOpts...)
if log.V(1) {
log.Infof(ctx.masterCtx, "dialing %s", target)
}
conn, err := grpc.DialContext(ctx.masterCtx, target, dialOpts...)
return conn, dialer.redialChan, err
}
// GRPCUnvalidatedDial uses GRPCDialNode and disables validation of the
// node ID between client and server. This function should only be
// used with the gossip client and CLI commands which can talk to any
// node. This method implies a SystemClass.
func (ctx *Context) GRPCUnvalidatedDial(target string) *Connection {
return ctx.grpcDialNodeInternal(target, 0, SystemClass)
}
// GRPCDialNode calls grpc.Dial with options appropriate for the
// context and class (see the comment on ConnectionClass).
//
// The remoteNodeID becomes a constraint on the expected node ID of
// the remote node; this is checked during heartbeats. The caller is
// responsible for ensuring the remote node ID is known prior to using
// this function.
func (ctx *Context) GRPCDialNode(
target string, remoteNodeID roachpb.NodeID, class ConnectionClass,
) *Connection {
if remoteNodeID == 0 && !ctx.TestingAllowNamedRPCToAnonymousServer {
log.Fatalf(context.TODO(), "invalid node ID 0 in GRPCDialNode()")
}
return ctx.grpcDialNodeInternal(target, remoteNodeID, class)
}
func (ctx *Context) grpcDialNodeInternal(
target string, remoteNodeID roachpb.NodeID, class ConnectionClass,
) *Connection {
thisConnKeys := []connKey{{target, remoteNodeID, class}}
value, ok := ctx.conns.Load(thisConnKeys[0])
if !ok {
value, _ = ctx.conns.LoadOrStore(thisConnKeys[0], newConnectionToNodeID(ctx.Stopper, remoteNodeID))
if remoteNodeID != 0 {
// If the first connection established at a target address is
// for a specific node ID, then we want to reuse that connection
// also for other dials (eg for gossip) which don't require a
// specific node ID. (We do this as an optimization to reduce
// the number of TCP connections alive between nodes. This is
// not strictly required for correctness.) This LoadOrStore will
// ensure we're registering the connection we just created for
// future use by these other dials.
//
// We need to be careful to unregister both connKeys when the
// connection breaks. Otherwise, we leak the entry below which
// "simulates" a hard network partition for anyone dialing without
// the nodeID (gossip).
//
// See:
// https://github.com/cockroachdb/cockroach/issues/37200
otherKey := connKey{target, 0, class}
if _, loaded := ctx.conns.LoadOrStore(otherKey, value); !loaded {
thisConnKeys = append(thisConnKeys, otherKey)
}
}
}
conn := value.(*Connection)
conn.initOnce.Do(func() {
// Either we kick off the heartbeat loop (and clean up when it's done),
// or we clean up the connKey entries immediately.
var redialChan <-chan struct{}
conn.grpcConn, redialChan, conn.dialErr = ctx.grpcDialRaw(target, remoteNodeID, class)
if conn.dialErr == nil {
if err := ctx.Stopper.RunTask(
ctx.masterCtx, "rpc.Context: grpc heartbeat", func(masterCtx context.Context) {
ctx.Stopper.RunWorker(masterCtx, func(masterCtx context.Context) {
err := ctx.runHeartbeat(conn, target, redialChan)
if err != nil && !grpcutil.IsClosedConnection(err) {
log.Errorf(masterCtx, "removing connection to %s due to error: %s", target, err)
}
ctx.removeConn(conn, thisConnKeys...)
})
}); err != nil {
conn.dialErr = err
}
}
if conn.dialErr != nil {
ctx.removeConn(conn, thisConnKeys...)
}
})
return conn
}
// NewBreaker creates a new circuit breaker properly configured for RPC
// connections. name is used internally for logging state changes of the
// returned breaker.
func (ctx *Context) NewBreaker(name string) *circuit.Breaker {
if ctx.BreakerFactory != nil {
return ctx.BreakerFactory()
}
return newBreaker(ctx.masterCtx, name, &ctx.breakerClock)
}
// ErrNotHeartbeated is returned by ConnHealth when we have not yet performed
// the first heartbeat.
var ErrNotHeartbeated = errors.New("not yet heartbeated")
func (ctx *Context) runHeartbeat(
conn *Connection, target string, redialChan <-chan struct{},
) (retErr error) {
ctx.metrics.HeartbeatLoopsStarted.Inc(1)
// setInitialHeartbeatDone is idempotent and is critical to notify Connect
// callers of the failure in the case where no heartbeat is ever sent.
state := updateHeartbeatState(&ctx.metrics, heartbeatNotRunning, heartbeatInitializing)
initialHeartbeatDone := false
setInitialHeartbeatDone := func() {
if !initialHeartbeatDone {
close(conn.initialHeartbeatDone)
initialHeartbeatDone = true
}
}
defer func() {
if retErr != nil {
ctx.metrics.HeartbeatLoopsExited.Inc(1)
}
updateHeartbeatState(&ctx.metrics, state, heartbeatNotRunning)
setInitialHeartbeatDone()
}()
maxOffset := ctx.LocalClock.MaxOffset()
maxOffsetNanos := maxOffset.Nanoseconds()
heartbeatClient := NewHeartbeatClient(conn.grpcConn)
var heartbeatTimer timeutil.Timer
defer heartbeatTimer.Stop()
// Give the first iteration a wait-free heartbeat attempt.
heartbeatTimer.Reset(0)
everSucceeded := false
for {
select {
case <-redialChan:
return grpcutil.ErrCannotReuseClientConn
case <-ctx.Stopper.ShouldQuiesce():
return nil
case <-heartbeatTimer.C:
heartbeatTimer.Read = true
}
if err := ctx.Stopper.RunTaskWithErr(ctx.masterCtx, "rpc heartbeat", func(goCtx context.Context) error {
// We re-mint the PingRequest to pick up any asynchronous update to clusterID.
clusterID := ctx.ClusterID.Get()
request := &PingRequest{
Addr: ctx.Addr,
MaxOffsetNanos: maxOffsetNanos,
ClusterID: &clusterID,
NodeID: conn.remoteNodeID,
ServerVersion: ctx.settings.Version.BinaryVersion(),
}
var response *PingResponse
sendTime := ctx.LocalClock.PhysicalTime()
ping := func(goCtx context.Context) (err error) {
// NB: We want the request to fail-fast (the default), otherwise we won't
// be notified of transport failures.
response, err = heartbeatClient.Ping(goCtx, request)
return err
}
var err error
if ctx.heartbeatTimeout > 0 {
err = contextutil.RunWithTimeout(goCtx, "rpc heartbeat", ctx.heartbeatTimeout, ping)
} else {
err = ping(goCtx)
}
if err == nil {
// We verify the cluster name on the initiator side (instead
// of the hearbeat service side, as done for the cluster ID
// and node ID checks) so that the operator who is starting a
// new node in a cluster and mistakenly joins the wrong
// cluster gets a chance to see the error message on their
// management console.
if !ctx.disableClusterNameVerification && !response.DisableClusterNameVerification {
err = errors.Wrap(
checkClusterName(ctx.clusterName, response.ClusterName),
"cluster name check failed on ping response")
}
}
if err == nil {
err = errors.Wrap(
checkVersion(goCtx, ctx.settings, response.ServerVersion),
"version compatibility check failed on ping response")
}
if err == nil {
everSucceeded = true
receiveTime := ctx.LocalClock.PhysicalTime()
// Only update the clock offset measurement if we actually got a
// successful response from the server.
pingDuration := receiveTime.Sub(sendTime)
maxOffset := ctx.LocalClock.MaxOffset()
if pingDuration > maximumPingDurationMult*maxOffset {
request.Offset.Reset()
} else {
// Offset and error are measured using the remote clock reading
// technique described in
// http://se.inf.tu-dresden.de/pubs/papers/SRDS1994.pdf, page 6.
// However, we assume that drift and min message delay are 0, for
// now.
request.Offset.MeasuredAt = receiveTime.UnixNano()
request.Offset.Uncertainty = (pingDuration / 2).Nanoseconds()
remoteTimeNow := timeutil.Unix(0, response.ServerTime).Add(pingDuration / 2)
request.Offset.Offset = remoteTimeNow.Sub(receiveTime).Nanoseconds()
}
ctx.RemoteClocks.UpdateOffset(ctx.masterCtx, target, request.Offset, pingDuration)
if cb := ctx.HeartbeatCB; cb != nil {
cb()
}
}
hr := heartbeatResult{
everSucceeded: everSucceeded,
err: err,
}
state = updateHeartbeatState(&ctx.metrics, state, hr.state())
conn.heartbeatResult.Store(hr)
setInitialHeartbeatDone()
return nil
}); err != nil {
return err
}
heartbeatTimer.Reset(ctx.heartbeatInterval)
}
}
| pkg/rpc/context.go | 1 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.007085392717272043,
0.0007420486072078347,
0.00016269854677375406,
0.00024339341325685382,
0.0010902966605499387
] |
{
"id": 2,
"code_window": [
"\t\t\t\topName, n.storeCfg.AmbientCtx.LogTags(), tracing.NonRecordableSpan,\n",
"\t\t\t)\n",
"\t\t\tctx = opentracing.ContextWithSpan(ctx, newSpan)\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tfinishSpan := func(br *roachpb.BatchResponse) {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t} else {\n",
"\t\t\tgrpcSpan.SetTag(\"node\", n.Descriptor.NodeID)\n"
],
"file_path": "pkg/server/node.go",
"type": "add",
"edit_start_line_idx": 954
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvcoord
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/errors"
)
// txnSeqNumAllocator is a txnInterceptor in charge of allocating sequence
// numbers to all the individual requests in batches.
//
// Sequence numbers serve a few roles in the transaction model:
//
// 1. they are used to enforce an ordering between read and write operations in a
// single transaction that go to the same key. Each read request that travels
// through the interceptor is assigned the sequence number of the most recent
// write. Each write request that travels through the interceptor is assigned
// a sequence number larger than any previously allocated.
//
// This is true even for leaf transaction coordinators. In their case, they are
// provided the sequence number of the most recent write during construction.
// Because they only perform read operations and never issue writes, they assign
// each read this sequence number without ever incrementing their own counter.
// In this way, sequence numbers are maintained correctly across a distributed
// tree of transaction coordinators.
//
// 2. they are used to uniquely identify write operations. Because every write
// request is given a new sequence number, the tuple (txn_id, txn_epoch, seq)
// uniquely identifies a write operation across an entire cluster. This property
// is exploited when determining the status of an individual write by looking
// for its intent. We perform such an operation using the QueryIntent request
// type when pipelining transactional writes. We will do something similar
// during the recovery stage of implicitly committed transactions.
//
// 3. they are used to determine whether a batch contains the entire write set
// for a transaction. See BatchRequest.IsCompleteTransaction.
//
// 4. they are used to provide idempotency for replays and re-issues. The MVCC
// layer is sequence number-aware and ensures that reads at a given sequence
// number ignore writes in the same transaction at larger sequence numbers.
// Likewise, writes at a sequence number become no-ops if an intent with the
// same sequence is already present. If an intent with the same sequence is not
// already present but an intent with a larger sequence number is, an error is
// returned. Likewise, if an intent with the same sequence is present but its
// value is different than what we recompute, an error is returned.
//
type txnSeqNumAllocator struct {
wrapped lockedSender
// writeSeq is the current write seqnum, i.e. the value last assigned
// to a write operation in a batch. It remains at 0 until the first
// write operation is encountered.
writeSeq enginepb.TxnSeq
// readSeq is the sequence number at which to perform read-only
// operations when steppingModeEnabled is set.
readSeq enginepb.TxnSeq
// steppingModeEnabled indicates whether to operate in stepping mode
// or read-own-writes:
// - in read-own-writes, read-only operations read at the latest
// write seqnum.
// - when stepping, read-only operations read at a
// fixed readSeq.
steppingModeEnabled bool
}
// SendLocked is part of the txnInterceptor interface.
func (s *txnSeqNumAllocator) SendLocked(
ctx context.Context, ba roachpb.BatchRequest,
) (*roachpb.BatchResponse, *roachpb.Error) {
for _, ru := range ba.Requests {
req := ru.GetInner()
// Only increment the sequence number generator for requests that
// will leave intents or requests that will commit the transaction.
// This enables ba.IsCompleteTransaction to work properly.
if roachpb.IsIntentWrite(req) || req.Method() == roachpb.EndTxn {
s.writeSeq++
}
// Note: only read-only requests can operate at a past seqnum.
// Combined read/write requests (e.g. CPut) always read at the
// latest write seqnum.
oldHeader := req.Header()
oldHeader.Sequence = s.writeSeq
if s.steppingModeEnabled && roachpb.IsReadOnly(req) {
oldHeader.Sequence = s.readSeq
}
req.SetHeader(oldHeader)
}
return s.wrapped.SendLocked(ctx, ba)
}
// setWrapped is part of the txnInterceptor interface.
func (s *txnSeqNumAllocator) setWrapped(wrapped lockedSender) { s.wrapped = wrapped }
// populateLeafInputState is part of the txnInterceptor interface.
func (s *txnSeqNumAllocator) populateLeafInputState(tis *roachpb.LeafTxnInputState) {
tis.Txn.Sequence = s.writeSeq
tis.SteppingModeEnabled = s.steppingModeEnabled
tis.ReadSeqNum = s.readSeq
}
// initializeLeaf loads the read seqnum for a leaf transaction.
func (s *txnSeqNumAllocator) initializeLeaf(tis *roachpb.LeafTxnInputState) {
s.steppingModeEnabled = tis.SteppingModeEnabled
s.readSeq = tis.ReadSeqNum
}
// populateLeafFinalState is part of the txnInterceptor interface.
func (s *txnSeqNumAllocator) populateLeafFinalState(tfs *roachpb.LeafTxnFinalState) {}
// importLeafFinalState is part of the txnInterceptor interface.
func (s *txnSeqNumAllocator) importLeafFinalState(
ctx context.Context, tfs *roachpb.LeafTxnFinalState,
) {
}
// stepLocked bumps the read seqnum to the current write seqnum.
// Used by the TxnCoordSender's Step() method.
func (s *txnSeqNumAllocator) stepLocked(ctx context.Context) error {
if !s.steppingModeEnabled {
return errors.AssertionFailedf("stepping mode is not enabled")
}
if s.readSeq > s.writeSeq {
return errors.AssertionFailedf(
"cannot step() after mistaken initialization (%d,%d)", s.writeSeq, s.readSeq)
}
s.readSeq = s.writeSeq
return nil
}
// configureSteppingLocked configures the stepping mode.
//
// When enabling stepping from the non-enabled state, the read seqnum
// is set to the current write seqnum, as if a snapshot was taken at
// the point stepping was enabled.
//
// The read seqnum is otherwise not modified when trying to enable
// stepping when it was previously enabled already. This is the
// behavior needed to provide the documented API semantics of
// sender.ConfigureStepping() (see client/sender.go).
func (s *txnSeqNumAllocator) configureSteppingLocked(
newMode kv.SteppingMode,
) (prevMode kv.SteppingMode) {
prevEnabled := s.steppingModeEnabled
enabled := newMode == kv.SteppingEnabled
s.steppingModeEnabled = enabled
if !prevEnabled && enabled {
s.readSeq = s.writeSeq
}
prevMode = kv.SteppingDisabled
if prevEnabled {
prevMode = kv.SteppingEnabled
}
return prevMode
}
// epochBumpedLocked is part of the txnInterceptor interface.
func (s *txnSeqNumAllocator) epochBumpedLocked() {
// Note: we do not touch steppingModeEnabled here: if stepping mode
// was enabled on the txn, it remains enabled.
s.writeSeq = 0
s.readSeq = 0
}
// createSavepointLocked is part of the txnReqInterceptor interface.
func (s *txnSeqNumAllocator) createSavepointLocked(ctx context.Context, sp *savepoint) {
sp.seqNum = s.writeSeq
}
// rollbackToSavepointLocked is part of the txnReqInterceptor interface.
func (*txnSeqNumAllocator) rollbackToSavepointLocked(context.Context, savepoint) {
// Nothing to restore. The seq nums keep increasing. The TxnCoordSender has
// added a range of sequence numbers to the ignored list.
}
// closeLocked is part of the txnInterceptor interface.
func (*txnSeqNumAllocator) closeLocked() {}
| pkg/kv/kvclient/kvcoord/txn_interceptor_seq_num_allocator.go | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.002441130578517914,
0.00036880021798424423,
0.00016225701256189495,
0.00017395468603353947,
0.0004965894040651619
] |
{
"id": 2,
"code_window": [
"\t\t\t\topName, n.storeCfg.AmbientCtx.LogTags(), tracing.NonRecordableSpan,\n",
"\t\t\t)\n",
"\t\t\tctx = opentracing.ContextWithSpan(ctx, newSpan)\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tfinishSpan := func(br *roachpb.BatchResponse) {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t} else {\n",
"\t\t\tgrpcSpan.SetTag(\"node\", n.Descriptor.NodeID)\n"
],
"file_path": "pkg/server/node.go",
"type": "add",
"edit_start_line_idx": 954
} | // Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package execinfra
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/util"
)
// MetadataTestReceiver is a Processors that is complimentary to
// MetadataTestSender which checks that all metadata emitted by latter is
// received.
type MetadataTestReceiver struct {
ProcessorBase
input RowSource
// trailingErrMeta stores the error metadata received from the input. We
// do not return this metadata immediately because metadata propagation errors
// are prioritized over query errors, which ensures that tests which expect a
// query error can still fail if they do not properly propagate metadata.
trailingErrMeta []execinfrapb.ProducerMetadata
senders []string
rowCounts map[string]rowNumCounter
}
type rowNumCounter struct {
expected, actual int32
seen util.FastIntSet
err error
}
var _ Processor = &MetadataTestReceiver{}
var _ RowSource = &MetadataTestReceiver{}
const metadataTestReceiverProcName = "meta receiver"
// NewMetadataTestReceiver creates a new MetadataTestReceiver.
func NewMetadataTestReceiver(
flowCtx *FlowCtx,
processorID int32,
input RowSource,
post *execinfrapb.PostProcessSpec,
output RowReceiver,
senders []string,
) (*MetadataTestReceiver, error) {
mtr := &MetadataTestReceiver{
input: input,
senders: senders,
rowCounts: make(map[string]rowNumCounter),
}
if err := mtr.Init(
mtr,
post,
input.OutputTypes(),
flowCtx,
processorID,
output,
nil, /* memMonitor */
ProcStateOpts{
InputsToDrain: []RowSource{input},
TrailingMetaCallback: func(context.Context) []execinfrapb.ProducerMetadata {
var trailingMeta []execinfrapb.ProducerMetadata
if mtr.rowCounts != nil {
if meta := mtr.checkRowNumMetadata(); meta != nil {
trailingMeta = append(trailingMeta, *meta)
}
}
mtr.InternalClose()
return trailingMeta
},
},
); err != nil {
return nil, err
}
return mtr, nil
}
// checkRowNumMetadata examines all of the received RowNum metadata to ensure
// that it has received exactly one of each expected RowNum. If the check
// detects dropped or repeated metadata, it returns error metadata. Otherwise,
// it returns nil.
func (mtr *MetadataTestReceiver) checkRowNumMetadata() *execinfrapb.ProducerMetadata {
defer func() { mtr.rowCounts = nil }()
if len(mtr.rowCounts) != len(mtr.senders) {
var missingSenders string
for _, sender := range mtr.senders {
if _, exists := mtr.rowCounts[sender]; !exists {
if missingSenders == "" {
missingSenders = sender
} else {
missingSenders += fmt.Sprintf(", %s", sender)
}
}
}
return &execinfrapb.ProducerMetadata{
Err: fmt.Errorf(
"expected %d metadata senders but found %d; missing %s",
len(mtr.senders), len(mtr.rowCounts), missingSenders,
),
}
}
for id, cnt := range mtr.rowCounts {
if cnt.err != nil {
return &execinfrapb.ProducerMetadata{Err: cnt.err}
}
if cnt.expected != cnt.actual {
return &execinfrapb.ProducerMetadata{
Err: fmt.Errorf(
"dropped metadata from sender %s: expected %d RowNum messages but got %d",
id, cnt.expected, cnt.actual),
}
}
for i := 0; i < int(cnt.expected); i++ {
if !cnt.seen.Contains(i) {
return &execinfrapb.ProducerMetadata{
Err: fmt.Errorf(
"dropped and repeated metadata from sender %s: have %d messages but missing RowNum #%d",
id, cnt.expected, i+1),
}
}
}
}
return nil
}
// Start is part of the RowSource interface.
func (mtr *MetadataTestReceiver) Start(ctx context.Context) context.Context {
mtr.input.Start(ctx)
return mtr.StartInternal(ctx, metadataTestReceiverProcName)
}
// Next is part of the RowSource interface.
//
// This implementation doesn't follow the usual patterns of other processors; it
// makes more limited use of the ProcessorBase's facilities because it needs to
// inspect metadata while draining.
func (mtr *MetadataTestReceiver) Next() (sqlbase.EncDatumRow, *execinfrapb.ProducerMetadata) {
for {
if mtr.State == StateTrailingMeta {
if meta := mtr.popTrailingMeta(); meta != nil {
return nil, meta
}
// If there's no more trailingMeta, we've moved to stateExhausted, and we
// might return some trailingErrMeta below.
}
if mtr.State == StateExhausted {
if len(mtr.trailingErrMeta) > 0 {
meta := mtr.trailingErrMeta[0]
mtr.trailingErrMeta = mtr.trailingErrMeta[1:]
return nil, &meta
}
return nil, nil
}
row, meta := mtr.input.Next()
if meta != nil {
if meta.RowNum != nil {
rowNum := meta.RowNum
rcnt, exists := mtr.rowCounts[rowNum.SenderID]
if !exists {
rcnt.expected = -1
}
if rcnt.err != nil {
return nil, meta
}
if rowNum.LastMsg {
if rcnt.expected != -1 {
rcnt.err = fmt.Errorf(
"repeated metadata from reader %s: received more than one RowNum with LastMsg set",
rowNum.SenderID)
mtr.rowCounts[rowNum.SenderID] = rcnt
return nil, meta
}
rcnt.expected = rowNum.RowNum
} else {
rcnt.actual++
rcnt.seen.Add(int(rowNum.RowNum - 1))
}
mtr.rowCounts[rowNum.SenderID] = rcnt
}
if meta.Err != nil {
// Keep track of the err in trailingErrMeta, which will be returned
// after everything else (including ProcessorBase.trailingMeta).
mtr.trailingErrMeta = append(mtr.trailingErrMeta, *meta)
continue
}
return nil, meta
}
if row == nil {
mtr.moveToTrailingMeta()
continue
}
// We don't use ProcessorBase.ProcessRowHelper() here because we need
// special handling for errors: this proc never starts draining in order for
// it to be as unintrusive as possible.
outRow, ok, err := mtr.Out.ProcessRow(mtr.Ctx, row)
if err != nil {
mtr.trailingMeta = append(mtr.trailingMeta, execinfrapb.ProducerMetadata{Err: err})
continue
}
if outRow == nil {
if !ok {
mtr.MoveToDraining(nil /* err */)
}
continue
}
// Swallow rows if we're draining.
if mtr.State == StateDraining {
continue
}
if !ok {
mtr.MoveToDraining(nil /* err */)
}
return outRow, nil
}
}
// ConsumerDone is part of the RowSource interface.
func (mtr *MetadataTestReceiver) ConsumerDone() {
mtr.input.ConsumerDone()
}
// ConsumerClosed is part of the RowSource interface.
func (mtr *MetadataTestReceiver) ConsumerClosed() {
// The consumer is done, Next() will not be called again.
mtr.InternalClose()
}
| pkg/sql/execinfra/metadata_test_receiver.go | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.0003298532101325691,
0.00018201548664364964,
0.00016321097791660577,
0.00017182530427817255,
0.00003715078128152527
] |
{
"id": 2,
"code_window": [
"\t\t\t\topName, n.storeCfg.AmbientCtx.LogTags(), tracing.NonRecordableSpan,\n",
"\t\t\t)\n",
"\t\t\tctx = opentracing.ContextWithSpan(ctx, newSpan)\n",
"\t\t}\n",
"\t}\n",
"\n",
"\tfinishSpan := func(br *roachpb.BatchResponse) {\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\t} else {\n",
"\t\t\tgrpcSpan.SetTag(\"node\", n.Descriptor.NodeID)\n"
],
"file_path": "pkg/server/node.go",
"type": "add",
"edit_start_line_idx": 954
} | // Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package syncutil
import "testing"
const magic64 = 0xdeddeadbeefbeef
// Tests of correct behavior, without contention.
// The loop over power-of-two values is meant to
// ensure that the operations apply to the full word size.
// The struct fields x.before and x.after check that the
// operations do not extend past the full word size.
//
// Adapted from https://golang.org/src/sync/atomic/atomic_test.go
func TestAtomicFloat64(t *testing.T) {
var x struct {
before AtomicFloat64
i AtomicFloat64
after AtomicFloat64
}
x.before = magic64
x.after = magic64
for delta := uint64(1); delta+delta > delta; delta += delta {
e := float64(delta)
StoreFloat64(&x.i, e)
a := LoadFloat64(&x.i)
if a != e {
t.Fatalf("stored=%f got=%f", e, a)
}
}
if x.before != magic64 || x.after != magic64 {
t.Fatalf("wrong magic: %#x _ %#x != %#x _ %#x", x.before, x.after, uint64(magic64), uint64(magic64))
}
}
| pkg/util/syncutil/atomic_test.go | 0 | https://github.com/cockroachdb/cockroach/commit/e42ed18755caa44737a019ea13193eeca118ba2d | [
0.00017618582933209836,
0.00017232660320587456,
0.00016777828568592668,
0.000171618492458947,
0.0000028901342830067733
] |
{
"id": 0,
"code_window": [
" \"//federation/pkg/kubefed:go_default_library\",\n",
" \"//pkg/client/metrics/prometheus:go_default_library\",\n",
" \"//pkg/kubectl/cmd/util:go_default_library\",\n",
" \"//pkg/util/logs:go_default_library\",\n",
" \"//pkg/version/prometheus:go_default_library\",\n",
" ],\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//pkg/version:go_default_library\",\n"
],
"file_path": "federation/cmd/kubefed/app/BUILD",
"type": "add",
"edit_start_line_idx": 18
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO(madhusdancs):
// 1. Make printSuccess prepend protocol/scheme to the IPs/hostnames.
// 2. Separate etcd container from API server pod as a first step towards enabling HA.
// 3. Make API server and controller manager replicas customizable via the HA work.
package init
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"sort"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
triple "k8s.io/client-go/util/cert/triple"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
"k8s.io/kubernetes/federation/apis/federation"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
"k8s.io/kubernetes/federation/pkg/kubefed/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/rbac"
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/version"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"gopkg.in/gcfg.v1"
)
const (
APIServerCN = "federation-apiserver"
ControllerManagerCN = "federation-controller-manager"
AdminCN = "admin"
HostClusterLocalDNSZoneName = "cluster.local."
APIServerNameSuffix = "apiserver"
CMNameSuffix = "controller-manager"
CredentialSuffix = "credentials"
KubeconfigNameSuffix = "kubeconfig"
// User name used by federation controller manager to make
// calls to federation API server.
ControllerManagerUser = "federation-controller-manager"
// Name of the ServiceAccount used by the federation controller manager
// to access the secrets in the host cluster.
ControllerManagerSA = "federation-controller-manager"
// Group name of the legacy/core API group
legacyAPIGroup = ""
lbAddrRetryInterval = 5 * time.Second
podWaitInterval = 2 * time.Second
apiserverServiceTypeFlag = "api-server-service-type"
apiserverAdvertiseAddressFlag = "api-server-advertise-address"
dnsProviderSecretName = "federation-dns-provider.conf"
apiServerSecurePortName = "https"
// Set the secure port to 8443 to avoid requiring root privileges
// to bind to port < 1000. The apiserver's service will still
// expose on port 443.
apiServerSecurePort = 8443
)
var (
init_long = templates.LongDesc(`
Initialize a federation control plane.
Federation control plane is hosted inside a Kubernetes
cluster. The host cluster must be specified using the
--host-cluster-context flag.`)
init_example = templates.Examples(`
# Initialize federation control plane for a federation
# named foo in the host cluster whose local kubeconfig
# context is bar.
kubefed init foo --host-cluster-context=bar`)
componentLabel = map[string]string{
"app": "federated-cluster",
}
apiserverSvcSelector = map[string]string{
"app": "federated-cluster",
"module": "federation-apiserver",
}
apiserverPodLabels = map[string]string{
"app": "federated-cluster",
"module": "federation-apiserver",
}
controllerManagerPodLabels = map[string]string{
"app": "federated-cluster",
"module": "federation-controller-manager",
}
hyperkubeImageName = "gcr.io/google_containers/hyperkube-amd64"
)
type initFederation struct {
commonOptions util.SubcommandOptions
options initFederationOptions
}
type initFederationOptions struct {
dnsZoneName string
image string
dnsProvider string
dnsProviderConfig string
etcdPVCapacity string
etcdPersistentStorage bool
dryRun bool
apiServerOverridesString string
apiServerOverrides map[string]string
controllerManagerOverridesString string
controllerManagerOverrides map[string]string
apiServerServiceTypeString string
apiServerServiceType v1.ServiceType
apiServerAdvertiseAddress string
apiServerEnableHTTPBasicAuth bool
apiServerEnableTokenAuth bool
}
func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {
defaultImage := fmt.Sprintf("%s:%s", hyperkubeImageName, version.Get())
flags.StringVar(&o.dnsZoneName, "dns-zone-name", "", "DNS suffix for this federation. Federated Service DNS names are published with this suffix.")
flags.StringVar(&o.image, "image", defaultImage, "Image to use for federation API server and controller manager binaries.")
flags.StringVar(&o.dnsProvider, "dns-provider", "", "Dns provider to be used for this deployment.")
flags.StringVar(&o.dnsProviderConfig, "dns-provider-config", "", "Config file path on local file system for configuring DNS provider.")
flags.StringVar(&o.etcdPVCapacity, "etcd-pv-capacity", "10Gi", "Size of persistent volume claim to be used for etcd.")
flags.BoolVar(&o.etcdPersistentStorage, "etcd-persistent-storage", true, "Use persistent volume for etcd. Defaults to 'true'.")
flags.BoolVar(&o.dryRun, "dry-run", false, "dry run without sending commands to server.")
flags.StringVar(&o.apiServerOverridesString, "apiserver-arg-overrides", "", "comma separated list of federation-apiserver arguments to override: Example \"--arg1=value1,--arg2=value2...\"")
flags.StringVar(&o.controllerManagerOverridesString, "controllermanager-arg-overrides", "", "comma separated list of federation-controller-manager arguments to override: Example \"--arg1=value1,--arg2=value2...\"")
flags.StringVar(&o.apiServerServiceTypeString, apiserverServiceTypeFlag, string(v1.ServiceTypeLoadBalancer), "The type of service to create for federation API server. Options: 'LoadBalancer' (default), 'NodePort'.")
flags.StringVar(&o.apiServerAdvertiseAddress, apiserverAdvertiseAddressFlag, "", "Preferred address to advertise api server nodeport service. Valid only if '"+apiserverServiceTypeFlag+"=NodePort'.")
flags.BoolVar(&o.apiServerEnableHTTPBasicAuth, "apiserver-enable-basic-auth", false, "Enables HTTP Basic authentication for the federation-apiserver. Defaults to false.")
flags.BoolVar(&o.apiServerEnableTokenAuth, "apiserver-enable-token-auth", false, "Enables token authentication for the federation-apiserver. Defaults to false.")
}
// NewCmdInit defines the `init` command that bootstraps a federation
// control plane inside a set of host clusters.
func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {
opts := &initFederation{}
cmd := &cobra.Command{
Use: "init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT",
Short: "init initializes a federation control plane",
Long: init_long,
Example: init_example,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(opts.Complete(cmd, args))
cmdutil.CheckErr(opts.Run(cmdOut, config))
},
}
flags := cmd.Flags()
opts.commonOptions.Bind(flags)
opts.options.Bind(flags)
return cmd
}
type entityKeyPairs struct {
ca *triple.KeyPair
server *triple.KeyPair
controllerManager *triple.KeyPair
admin *triple.KeyPair
}
type credentials struct {
username string
password string
token string
certEntKeyPairs *entityKeyPairs
}
// Complete ensures that options are valid and marshals them if necessary.
func (i *initFederation) Complete(cmd *cobra.Command, args []string) error {
if len(i.options.dnsProvider) == 0 {
return fmt.Errorf("--dns-provider is mandatory")
}
err := i.commonOptions.SetName(cmd, args)
if err != nil {
return err
}
i.options.apiServerServiceType = v1.ServiceType(i.options.apiServerServiceTypeString)
if i.options.apiServerServiceType != v1.ServiceTypeLoadBalancer && i.options.apiServerServiceType != v1.ServiceTypeNodePort {
return fmt.Errorf("invalid %s: %s, should be either %s or %s", apiserverServiceTypeFlag, i.options.apiServerServiceType, v1.ServiceTypeLoadBalancer, v1.ServiceTypeNodePort)
}
if i.options.apiServerAdvertiseAddress != "" {
ip := net.ParseIP(i.options.apiServerAdvertiseAddress)
if ip == nil {
return fmt.Errorf("invalid %s: %s, should be a valid ip address", apiserverAdvertiseAddressFlag, i.options.apiServerAdvertiseAddress)
}
if i.options.apiServerServiceType != v1.ServiceTypeNodePort {
return fmt.Errorf("%s should be passed only with '%s=NodePort'", apiserverAdvertiseAddressFlag, apiserverServiceTypeFlag)
}
}
i.options.apiServerOverrides, err = marshallOverrides(i.options.apiServerOverridesString)
if err != nil {
return fmt.Errorf("error marshalling --apiserver-arg-overrides: %v", err)
}
i.options.controllerManagerOverrides, err = marshallOverrides(i.options.controllerManagerOverridesString)
if err != nil {
return fmt.Errorf("error marshalling --controllermanager-arg-overrides: %v", err)
}
if i.options.dnsProviderConfig != "" {
if _, err := os.Stat(i.options.dnsProviderConfig); err != nil {
return fmt.Errorf("error reading file provided to --dns-provider-config flag, err: %v", err)
}
}
return nil
}
// Run initializes a federation control plane.
// See the design doc in https://github.com/kubernetes/kubernetes/pull/34484
// for details.
func (i *initFederation) Run(cmdOut io.Writer, config util.AdminConfig) error {
hostFactory := config.ClusterFactory(i.commonOptions.Host, i.commonOptions.Kubeconfig)
hostClientset, err := hostFactory.ClientSet()
if err != nil {
return err
}
rbacAvailable := true
rbacVersionedClientset, err := util.GetVersionedClientForRBACOrFail(hostFactory)
if err != nil {
if _, ok := err.(*util.NoRBACAPIError); !ok {
return err
}
// If the error is type NoRBACAPIError, We continue to create the rest of
// the resources, without the SA and roles (in the abscense of RBAC support).
rbacAvailable = false
}
serverName := fmt.Sprintf("%s-%s", i.commonOptions.Name, APIServerNameSuffix)
serverCredName := fmt.Sprintf("%s-%s", serverName, CredentialSuffix)
cmName := fmt.Sprintf("%s-%s", i.commonOptions.Name, CMNameSuffix)
cmKubeconfigName := fmt.Sprintf("%s-%s", cmName, KubeconfigNameSuffix)
var dnsProviderConfigBytes []byte
if i.options.dnsProviderConfig != "" {
dnsProviderConfigBytes, err = ioutil.ReadFile(i.options.dnsProviderConfig)
if err != nil {
return fmt.Errorf("Error reading file provided to --dns-provider-config flag, err: %v", err)
}
}
fmt.Fprintf(cmdOut, "Creating a namespace %s for federation system components...", i.commonOptions.FederationSystemNamespace)
glog.V(4).Infof("Creating a namespace %s for federation system components", i.commonOptions.FederationSystemNamespace)
_, err = createNamespace(hostClientset, i.commonOptions.Name, i.commonOptions.FederationSystemNamespace, i.options.dryRun)
if err != nil {
return err
}
fmt.Fprintln(cmdOut, " done")
fmt.Fprint(cmdOut, "Creating federation control plane service...")
glog.V(4).Info("Creating federation control plane service")
svc, ips, hostnames, err := createService(cmdOut, hostClientset, i.commonOptions.FederationSystemNamespace, serverName, i.commonOptions.Name, i.options.apiServerAdvertiseAddress, i.options.apiServerServiceType, i.options.dryRun)
if err != nil {
return err
}
fmt.Fprintln(cmdOut, " done")
glog.V(4).Infof("Created service named %s with IP addresses %v, hostnames %v", svc.Name, ips, hostnames)
fmt.Fprint(cmdOut, "Creating federation control plane objects (credentials, persistent volume claim)...")
glog.V(4).Info("Generating TLS certificates and credentials for communicating with the federation API server")
credentials, err := generateCredentials(i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, HostClusterLocalDNSZoneName, serverCredName, ips, hostnames, i.options.apiServerEnableHTTPBasicAuth, i.options.apiServerEnableTokenAuth, i.options.dryRun)
if err != nil {
return err
}
// Create the secret containing the credentials.
_, err = createAPIServerCredentialsSecret(hostClientset, i.commonOptions.FederationSystemNamespace, serverCredName, i.commonOptions.Name, credentials, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Certificates and credentials generated")
glog.V(4).Info("Creating an entry in the kubeconfig file with the certificate and credential data")
_, err = createControllerManagerKubeconfigSecret(hostClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, cmKubeconfigName, credentials.certEntKeyPairs, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Credentials secret successfully created")
glog.V(4).Info("Creating a persistent volume and a claim to store the federation API server's state, including etcd data")
var pvc *api.PersistentVolumeClaim
if i.options.etcdPersistentStorage {
pvc, err = createPVC(hostClientset, i.commonOptions.FederationSystemNamespace, svc.Name, i.commonOptions.Name, i.options.etcdPVCapacity, i.options.dryRun)
if err != nil {
return err
}
}
glog.V(4).Info("Persistent volume and claim created")
fmt.Fprintln(cmdOut, " done")
// Since only one IP address can be specified as advertise address,
// we arbitrarily pick the first available IP address
// Pick user provided apiserverAdvertiseAddress over other available IP addresses.
advertiseAddress := i.options.apiServerAdvertiseAddress
if advertiseAddress == "" && len(ips) > 0 {
advertiseAddress = ips[0]
}
fmt.Fprint(cmdOut, "Creating federation component deployments...")
glog.V(4).Info("Creating federation control plane components")
_, err = createAPIServer(hostClientset, i.commonOptions.FederationSystemNamespace, serverName, i.commonOptions.Name, i.options.image, advertiseAddress, serverCredName, i.options.apiServerEnableHTTPBasicAuth, i.options.apiServerEnableTokenAuth, i.options.apiServerOverrides, pvc, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation API server")
sa := &api.ServiceAccount{}
sa.Name = ""
// Create a service account and related RBAC roles if the host cluster has RBAC support.
// TODO: We must evaluate creating a separate service account even when RBAC support is missing
if rbacAvailable {
glog.V(4).Info("Creating service account for federation controller manager in the host cluster")
sa, err = createControllerManagerSA(rbacVersionedClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation controller manager service account")
glog.V(4).Info("Creating RBAC role and role bindings for the federation controller manager's service account")
_, _, err = createRoleBindings(rbacVersionedClientset, i.commonOptions.FederationSystemNamespace, sa.Name, i.commonOptions.Name, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created RBAC role and role bindings")
}
glog.V(4).Info("Creating a DNS provider config secret")
dnsProviderSecret, err := createDNSProviderConfigSecret(hostClientset, i.commonOptions.FederationSystemNamespace, dnsProviderSecretName, i.commonOptions.Name, dnsProviderConfigBytes, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created DNS provider config secret")
glog.V(4).Info("Creating federation controller manager deployment")
_, err = createControllerManager(hostClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, cmName, i.options.image, cmKubeconfigName, i.options.dnsZoneName, i.options.dnsProvider, i.options.dnsProviderConfig, sa.Name, dnsProviderSecret, i.options.controllerManagerOverrides, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation controller manager deployment")
fmt.Println(cmdOut, " done")
fmt.Fprint(cmdOut, "Updating kubeconfig...")
glog.V(4).Info("Updating kubeconfig")
// Pick the first ip/hostname to update the api server endpoint in kubeconfig and also to give information to user
// In case of NodePort Service for api server, ips are node external ips.
endpoint := ""
if len(ips) > 0 {
endpoint = ips[0]
} else if len(hostnames) > 0 {
endpoint = hostnames[0]
}
// If the service is nodeport, need to append the port to endpoint as it is non-standard port
if i.options.apiServerServiceType == v1.ServiceTypeNodePort {
endpoint = endpoint + ":" + strconv.Itoa(int(svc.Spec.Ports[0].NodePort))
}
err = updateKubeconfig(config, i.commonOptions.Name, endpoint, i.commonOptions.Kubeconfig, credentials, i.options.dryRun)
if err != nil {
glog.V(4).Infof("Failed to update kubeconfig: %v", err)
return err
}
fmt.Fprintln(cmdOut, " done")
glog.V(4).Info("Successfully updated kubeconfig")
if !i.options.dryRun {
fmt.Fprint(cmdOut, "Waiting for federation control plane to come up...")
glog.V(4).Info("Waiting for federation control plane to come up")
fedPods := []string{serverName, cmName}
err = waitForPods(cmdOut, hostClientset, fedPods, i.commonOptions.FederationSystemNamespace)
if err != nil {
return err
}
err = waitSrvHealthy(cmdOut, config, i.commonOptions.Name, i.commonOptions.Kubeconfig)
if err != nil {
return err
}
glog.V(4).Info("Federation control plane running")
fmt.Fprintln(cmdOut, " done")
return printSuccess(cmdOut, ips, hostnames, svc)
}
_, err = fmt.Fprintln(cmdOut, "Federation control plane runs (dry run)")
glog.V(4).Info("Federation control plane runs (dry run)")
return err
}
func createNamespace(clientset client.Interface, federationName, namespace string, dryRun bool) (*api.Namespace, error) {
ns := &api.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
}
if dryRun {
return ns, nil
}
return clientset.Core().Namespaces().Create(ns)
}
func createService(cmdOut io.Writer, clientset client.Interface, namespace, svcName, federationName, apiserverAdvertiseAddress string, apiserverServiceType v1.ServiceType, dryRun bool) (*api.Service, []string, []string, error) {
svc := &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcName,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: api.ServiceSpec{
Type: api.ServiceType(apiserverServiceType),
Selector: apiserverSvcSelector,
Ports: []api.ServicePort{
{
Name: "https",
Protocol: "TCP",
Port: 443,
TargetPort: intstr.FromString(apiServerSecurePortName),
},
},
},
}
if dryRun {
return svc, nil, nil, nil
}
var err error
svc, err = clientset.Core().Services(namespace).Create(svc)
ips := []string{}
hostnames := []string{}
if apiserverServiceType == v1.ServiceTypeLoadBalancer {
ips, hostnames, err = waitForLoadBalancerAddress(cmdOut, clientset, svc, dryRun)
} else {
if apiserverAdvertiseAddress != "" {
ips = append(ips, apiserverAdvertiseAddress)
} else {
ips, err = getClusterNodeIPs(clientset)
}
}
if err != nil {
return svc, nil, nil, err
}
return svc, ips, hostnames, err
}
func getClusterNodeIPs(clientset client.Interface) ([]string, error) {
preferredAddressTypes := []api.NodeAddressType{
api.NodeExternalIP,
}
nodeList, err := clientset.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
nodeAddresses := []string{}
for _, node := range nodeList.Items {
OuterLoop:
for _, addressType := range preferredAddressTypes {
for _, address := range node.Status.Addresses {
if address.Type == addressType {
nodeAddresses = append(nodeAddresses, address.Address)
break OuterLoop
}
}
}
}
return nodeAddresses, nil
}
func waitForLoadBalancerAddress(cmdOut io.Writer, clientset client.Interface, svc *api.Service, dryRun bool) ([]string, []string, error) {
ips := []string{}
hostnames := []string{}
if dryRun {
return ips, hostnames, nil
}
err := wait.PollImmediateInfinite(lbAddrRetryInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
pollSvc, err := clientset.Core().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if ings := pollSvc.Status.LoadBalancer.Ingress; len(ings) > 0 {
for _, ing := range ings {
if len(ing.IP) > 0 {
ips = append(ips, ing.IP)
}
if len(ing.Hostname) > 0 {
hostnames = append(hostnames, ing.Hostname)
}
}
if len(ips) > 0 || len(hostnames) > 0 {
return true, nil
}
}
return false, nil
})
if err != nil {
return nil, nil, err
}
return ips, hostnames, nil
}
func generateCredentials(svcNamespace, name, svcName, localDNSZoneName, serverCredName string, ips, hostnames []string, enableHTTPBasicAuth, enableTokenAuth, dryRun bool) (*credentials, error) {
credentials := credentials{
username: AdminCN,
}
if enableHTTPBasicAuth {
credentials.password = string(uuid.NewUUID())
}
if enableTokenAuth {
credentials.token = string(uuid.NewUUID())
}
entKeyPairs, err := genCerts(svcNamespace, name, svcName, localDNSZoneName, ips, hostnames)
if err != nil {
return nil, err
}
credentials.certEntKeyPairs = entKeyPairs
return &credentials, nil
}
func genCerts(svcNamespace, name, svcName, localDNSZoneName string, ips, hostnames []string) (*entityKeyPairs, error) {
ca, err := triple.NewCA(name)
if err != nil {
return nil, fmt.Errorf("failed to create CA key and certificate: %v", err)
}
server, err := triple.NewServerKeyPair(ca, APIServerCN, svcName, svcNamespace, localDNSZoneName, ips, hostnames)
if err != nil {
return nil, fmt.Errorf("failed to create federation API server key and certificate: %v", err)
}
cm, err := triple.NewClientKeyPair(ca, ControllerManagerCN, nil)
if err != nil {
return nil, fmt.Errorf("failed to create federation controller manager client key and certificate: %v", err)
}
admin, err := triple.NewClientKeyPair(ca, AdminCN, nil)
if err != nil {
return nil, fmt.Errorf("failed to create client key and certificate for an admin: %v", err)
}
return &entityKeyPairs{
ca: ca,
server: server,
controllerManager: cm,
admin: admin,
}, nil
}
func createAPIServerCredentialsSecret(clientset client.Interface, namespace, credentialsName, federationName string, credentials *credentials, dryRun bool) (*api.Secret, error) {
// Build the secret object with API server credentials.
data := map[string][]byte{
"ca.crt": certutil.EncodeCertPEM(credentials.certEntKeyPairs.ca.Cert),
"server.crt": certutil.EncodeCertPEM(credentials.certEntKeyPairs.server.Cert),
"server.key": certutil.EncodePrivateKeyPEM(credentials.certEntKeyPairs.server.Key),
}
if credentials.password != "" {
data["basicauth.csv"] = authFileContents(credentials.username, credentials.password)
}
if credentials.token != "" {
data["token.csv"] = authFileContents(credentials.username, credentials.token)
}
secret := &api.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: credentialsName,
Namespace: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Data: data,
}
if dryRun {
return secret, nil
}
// Boilerplate to create the secret in the host cluster.
return clientset.Core().Secrets(namespace).Create(secret)
}
func createControllerManagerKubeconfigSecret(clientset client.Interface, namespace, name, svcName, kubeconfigName string, entKeyPairs *entityKeyPairs, dryRun bool) (*api.Secret, error) {
config := kubeconfigutil.CreateWithCerts(
fmt.Sprintf("https://%s", svcName),
name,
ControllerManagerUser,
certutil.EncodeCertPEM(entKeyPairs.ca.Cert),
certutil.EncodePrivateKeyPEM(entKeyPairs.controllerManager.Key),
certutil.EncodeCertPEM(entKeyPairs.controllerManager.Cert),
)
return util.CreateKubeconfigSecret(clientset, config, namespace, kubeconfigName, name, "", dryRun)
}
func createPVC(clientset client.Interface, namespace, svcName, federationName, etcdPVCapacity string, dryRun bool) (*api.PersistentVolumeClaim, error) {
capacity, err := resource.ParseQuantity(etcdPVCapacity)
if err != nil {
return nil, err
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-etcd-claim", svcName),
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "yes",
federation.FederationNameAnnotation: federationName},
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceStorage: capacity,
},
},
},
}
if dryRun {
return pvc, nil
}
return clientset.Core().PersistentVolumeClaims(namespace).Create(pvc)
}
func createAPIServer(clientset client.Interface, namespace, name, federationName, image, advertiseAddress, credentialsName string, hasHTTPBasicAuthFile, hasTokenAuthFile bool, argOverrides map[string]string, pvc *api.PersistentVolumeClaim, dryRun bool) (*extensions.Deployment, error) {
command := []string{
"/hyperkube",
"federation-apiserver",
}
argsMap := map[string]string{
"--bind-address": "0.0.0.0",
"--etcd-servers": "http://localhost:2379",
"--secure-port": fmt.Sprintf("%d", apiServerSecurePort),
"--client-ca-file": "/etc/federation/apiserver/ca.crt",
"--tls-cert-file": "/etc/federation/apiserver/server.crt",
"--tls-private-key-file": "/etc/federation/apiserver/server.key",
"--admission-control": "NamespaceLifecycle",
}
if advertiseAddress != "" {
argsMap["--advertise-address"] = advertiseAddress
}
if hasHTTPBasicAuthFile {
argsMap["--basic-auth-file"] = "/etc/federation/apiserver/basicauth.csv"
}
if hasTokenAuthFile {
argsMap["--token-auth-file"] = "/etc/federation/apiserver/token.csv"
}
args := argMapsToArgStrings(argsMap, argOverrides)
command = append(command, args...)
dep := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: extensions.DeploymentSpec{
Replicas: 1,
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: apiserverPodLabels,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "apiserver",
Image: image,
Command: command,
Ports: []api.ContainerPort{
{
Name: apiServerSecurePortName,
ContainerPort: apiServerSecurePort,
},
{
Name: "local",
ContainerPort: 8080,
},
},
VolumeMounts: []api.VolumeMount{
{
Name: credentialsName,
MountPath: "/etc/federation/apiserver",
ReadOnly: true,
},
},
},
{
Name: "etcd",
Image: "gcr.io/google_containers/etcd:3.0.17",
Command: []string{
"/usr/local/bin/etcd",
"--data-dir",
"/var/etcd/data",
},
},
},
Volumes: []api.Volume{
{
Name: credentialsName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: credentialsName,
},
},
},
},
},
},
},
}
if pvc != nil {
dataVolumeName := "etcddata"
etcdVolume := api.Volume{
Name: dataVolumeName,
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
}
etcdVolumeMount := api.VolumeMount{
Name: dataVolumeName,
MountPath: "/var/etcd",
}
dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, etcdVolume)
for i, container := range dep.Spec.Template.Spec.Containers {
if container.Name == "etcd" {
dep.Spec.Template.Spec.Containers[i].VolumeMounts = append(dep.Spec.Template.Spec.Containers[i].VolumeMounts, etcdVolumeMount)
}
}
}
if dryRun {
return dep, nil
}
createdDep, err := clientset.Extensions().Deployments(namespace).Create(dep)
return createdDep, err
}
func createControllerManagerSA(clientset client.Interface, namespace, federationName string, dryRun bool) (*api.ServiceAccount, error) {
sa := &api.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: ControllerManagerSA,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
}
if dryRun {
return sa, nil
}
return clientset.Core().ServiceAccounts(namespace).Create(sa)
}
func createRoleBindings(clientset client.Interface, namespace, saName, federationName string, dryRun bool) (*rbac.Role, *rbac.RoleBinding, error) {
roleName := "federation-system:federation-controller-manager"
role := &rbac.Role{
// a role to use for bootstrapping the federation-controller-manager so it can access
// secrets in the host cluster to access other clusters.
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(legacyAPIGroup).Resources("secrets").RuleOrDie(),
},
}
rolebinding, err := rbac.NewRoleBinding(roleName, namespace).SAs(namespace, saName).Binding()
if err != nil {
return nil, nil, err
}
rolebinding.Labels = componentLabel
rolebinding.Annotations = map[string]string{federation.FederationNameAnnotation: federationName}
if dryRun {
return role, &rolebinding, nil
}
newRole, err := clientset.Rbac().Roles(namespace).Create(role)
if err != nil {
return nil, nil, err
}
newRolebinding, err := clientset.Rbac().RoleBindings(namespace).Create(&rolebinding)
return newRole, newRolebinding, err
}
func createControllerManager(clientset client.Interface, namespace, name, svcName, cmName, image, kubeconfigName, dnsZoneName, dnsProvider, dnsProviderConfig, saName string, dnsProviderSecret *api.Secret, argOverrides map[string]string, dryRun bool) (*extensions.Deployment, error) {
command := []string{
"/hyperkube",
"federation-controller-manager",
}
argsMap := map[string]string{
"--kubeconfig": "/etc/federation/controller-manager/kubeconfig",
}
argsMap["--master"] = fmt.Sprintf("https://%s", svcName)
argsMap["--dns-provider"] = dnsProvider
argsMap["--federation-name"] = name
argsMap["--zone-name"] = dnsZoneName
args := argMapsToArgStrings(argsMap, argOverrides)
command = append(command, args...)
dep := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Namespace: namespace,
Labels: componentLabel,
// We additionally update the details (in annotations) about the
// kube-dns config map which needs to be created in the clusters
// registering to this federation (at kubefed join).
// We wont otherwise have this information available at kubefed join.
Annotations: map[string]string{
// TODO: the name/domain name pair should ideally be checked for naming convention
// as done in kube-dns federation flags check.
// https://github.com/kubernetes/dns/blob/master/pkg/dns/federation/federation.go
// TODO v2: Until kube-dns can handle trailing periods we strip them all.
// See https://github.com/kubernetes/dns/issues/67
util.FedDomainMapKey: fmt.Sprintf("%s=%s", name, strings.TrimRight(dnsZoneName, ".")),
federation.FederationNameAnnotation: name,
},
},
Spec: extensions.DeploymentSpec{
Replicas: 1,
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Labels: controllerManagerPodLabels,
Annotations: map[string]string{federation.FederationNameAnnotation: name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "controller-manager",
Image: image,
Command: command,
VolumeMounts: []api.VolumeMount{
{
Name: kubeconfigName,
MountPath: "/etc/federation/controller-manager",
ReadOnly: true,
},
},
Env: []api.EnvVar{
{
Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
},
},
Volumes: []api.Volume{
{
Name: kubeconfigName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: kubeconfigName,
},
},
},
},
},
},
},
}
if saName != "" {
dep.Spec.Template.Spec.ServiceAccountName = saName
}
if dnsProviderSecret != nil {
dep = addDNSProviderConfig(dep, dnsProviderSecret.Name)
if dnsProvider == util.FedDNSProviderCoreDNS {
var err error
dep, err = addCoreDNSServerAnnotation(dep, dnsZoneName, dnsProviderConfig)
if err != nil {
return nil, err
}
}
}
if dryRun {
return dep, nil
}
return clientset.Extensions().Deployments(namespace).Create(dep)
}
func marshallOverrides(overrideArgString string) (map[string]string, error) {
if overrideArgString == "" {
return nil, nil
}
argsMap := make(map[string]string)
overrideArgs := strings.Split(overrideArgString, ",")
for _, overrideArg := range overrideArgs {
splitArg := strings.SplitN(overrideArg, "=", 2)
if len(splitArg) != 2 {
return nil, fmt.Errorf("wrong format for override arg: %s", overrideArg)
}
key := strings.TrimSpace(splitArg[0])
val := strings.TrimSpace(splitArg[1])
if len(key) == 0 {
return nil, fmt.Errorf("wrong format for override arg: %s, arg name cannot be empty", overrideArg)
}
argsMap[key] = val
}
return argsMap, nil
}
func argMapsToArgStrings(argsMap, overrides map[string]string) []string {
for key, val := range overrides {
argsMap[key] = val
}
args := []string{}
for key, value := range argsMap {
args = append(args, fmt.Sprintf("%s=%s", key, value))
}
// This is needed for the unit test deep copy to get an exact match
sort.Strings(args)
return args
}
func waitForPods(cmdOut io.Writer, clientset client.Interface, fedPods []string, namespace string) error {
err := wait.PollInfinite(podWaitInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
podCheck := len(fedPods)
podList, err := clientset.Core().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return false, nil
}
for _, pod := range podList.Items {
for _, fedPod := range fedPods {
if strings.HasPrefix(pod.Name, fedPod) && pod.Status.Phase == "Running" {
podCheck -= 1
}
}
//ensure that all pods are in running state or keep waiting
if podCheck == 0 {
return true, nil
}
}
return false, nil
})
return err
}
func waitSrvHealthy(cmdOut io.Writer, config util.AdminConfig, context, kubeconfig string) error {
fedClientSet, err := config.FederationClientset(context, kubeconfig)
if err != nil {
return err
}
fedDiscoveryClient := fedClientSet.Discovery()
err = wait.PollInfinite(podWaitInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
body, err := fedDiscoveryClient.RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err != nil {
return false, nil
}
if strings.EqualFold(string(body), "ok") {
return true, nil
}
return false, nil
})
return err
}
func printSuccess(cmdOut io.Writer, ips, hostnames []string, svc *api.Service) error {
svcEndpoints := append(ips, hostnames...)
endpoints := strings.Join(svcEndpoints, ", ")
if svc.Spec.Type == api.ServiceTypeNodePort {
endpoints = ips[0] + ":" + strconv.Itoa(int(svc.Spec.Ports[0].NodePort))
if len(ips) > 1 {
endpoints = endpoints + ", ..."
}
}
_, err := fmt.Fprintf(cmdOut, "Federation API server is running at: %s\n", endpoints)
return err
}
func updateKubeconfig(config util.AdminConfig, name, endpoint, kubeConfigPath string, credentials *credentials, dryRun bool) error {
po := config.PathOptions()
po.LoadingRules.ExplicitPath = kubeConfigPath
kubeconfig, err := po.GetStartingConfig()
if err != nil {
return err
}
// Populate API server endpoint info.
cluster := clientcmdapi.NewCluster()
// Prefix "https" as the URL scheme to endpoint.
if !strings.HasPrefix(endpoint, "https://") {
endpoint = fmt.Sprintf("https://%s", endpoint)
}
cluster.Server = endpoint
cluster.CertificateAuthorityData = certutil.EncodeCertPEM(credentials.certEntKeyPairs.ca.Cert)
// Populate credentials.
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = certutil.EncodeCertPEM(credentials.certEntKeyPairs.admin.Cert)
authInfo.ClientKeyData = certutil.EncodePrivateKeyPEM(credentials.certEntKeyPairs.admin.Key)
authInfo.Token = credentials.token
var httpBasicAuthInfo *clientcmdapi.AuthInfo
if credentials.password != "" {
httpBasicAuthInfo = clientcmdapi.NewAuthInfo()
httpBasicAuthInfo.Password = credentials.password
httpBasicAuthInfo.Username = credentials.username
}
// Populate context.
context := clientcmdapi.NewContext()
context.Cluster = name
context.AuthInfo = name
// Update the config struct with API server endpoint info,
// credentials and context.
kubeconfig.Clusters[name] = cluster
kubeconfig.AuthInfos[name] = authInfo
if httpBasicAuthInfo != nil {
kubeconfig.AuthInfos[fmt.Sprintf("%s-basic-auth", name)] = httpBasicAuthInfo
}
kubeconfig.Contexts[name] = context
if !dryRun {
// Write the update kubeconfig.
if err := clientcmd.ModifyConfig(po, *kubeconfig, true); err != nil {
return err
}
}
return nil
}
func createDNSProviderConfigSecret(clientset client.Interface, namespace, name, federationName string, dnsProviderConfigBytes []byte, dryRun bool) (*api.Secret, error) {
if dnsProviderConfigBytes == nil {
return nil, nil
}
secretSpec := &api.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Data: map[string][]byte{
name: dnsProviderConfigBytes,
},
}
var secret *api.Secret
var err error
if !dryRun {
secret, err = clientset.Core().Secrets(namespace).Create(secretSpec)
if err != nil {
return nil, err
}
}
return secret, nil
}
func addDNSProviderConfig(dep *extensions.Deployment, secretName string) *extensions.Deployment {
const (
dnsProviderConfigVolume = "config-volume"
dnsProviderConfigMountPath = "/etc/federation/dns-provider"
)
// Create a volume from dns-provider secret
volume := api.Volume{
Name: dnsProviderConfigVolume,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: secretName,
},
},
}
dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, volume)
// Mount dns-provider secret volume to controller-manager container
volumeMount := api.VolumeMount{
Name: dnsProviderConfigVolume,
MountPath: dnsProviderConfigMountPath,
ReadOnly: true,
}
dep.Spec.Template.Spec.Containers[0].VolumeMounts = append(dep.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
dep.Spec.Template.Spec.Containers[0].Command = append(dep.Spec.Template.Spec.Containers[0].Command, fmt.Sprintf("--dns-provider-config=%s/%s", dnsProviderConfigMountPath, secretName))
return dep
}
// authFileContents returns a CSV string containing the contents of an
// authentication file in the format required by the federation-apiserver.
func authFileContents(username, authSecret string) []byte {
return []byte(fmt.Sprintf("%s,%s,%s\n", authSecret, username, uuid.NewUUID()))
}
func addCoreDNSServerAnnotation(deployment *extensions.Deployment, dnsZoneName, dnsProviderConfig string) (*extensions.Deployment, error) {
var cfg coredns.Config
if err := gcfg.ReadFileInto(&cfg, dnsProviderConfig); err != nil {
return nil, err
}
deployment.Annotations[util.FedDNSZoneName] = dnsZoneName
deployment.Annotations[util.FedNameServer] = cfg.Global.CoreDNSEndpoints
deployment.Annotations[util.FedDNSProvider] = util.FedDNSProviderCoreDNS
return deployment, nil
}
| federation/pkg/kubefed/init/init.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.010483183898031712,
0.0004978235810995102,
0.00016364733164664358,
0.0001732188684400171,
0.001445265719667077
] |
{
"id": 0,
"code_window": [
" \"//federation/pkg/kubefed:go_default_library\",\n",
" \"//pkg/client/metrics/prometheus:go_default_library\",\n",
" \"//pkg/kubectl/cmd/util:go_default_library\",\n",
" \"//pkg/util/logs:go_default_library\",\n",
" \"//pkg/version/prometheus:go_default_library\",\n",
" ],\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//pkg/version:go_default_library\",\n"
],
"file_path": "federation/cmd/kubefed/app/BUILD",
"type": "add",
"edit_start_line_idx": 18
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"bidi.go",
"bracket.go",
"core.go",
"prop.go",
"tables.go",
"trieval.go",
],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| vendor/golang.org/x/text/unicode/bidi/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0013884683139622211,
0.0004767565696965903,
0.00016898837930057198,
0.00017478482914157212,
0.0005263902712613344
] |
{
"id": 0,
"code_window": [
" \"//federation/pkg/kubefed:go_default_library\",\n",
" \"//pkg/client/metrics/prometheus:go_default_library\",\n",
" \"//pkg/kubectl/cmd/util:go_default_library\",\n",
" \"//pkg/util/logs:go_default_library\",\n",
" \"//pkg/version/prometheus:go_default_library\",\n",
" ],\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//pkg/version:go_default_library\",\n"
],
"file_path": "federation/cmd/kubefed/app/BUILD",
"type": "add",
"edit_start_line_idx": 18
} | services:
kubernetes:
charm: __CHARM_DIR__/builds/kubernetes
annotations:
"gui-x": "600"
"gui-y": "0"
expose: true
num_units: 2
etcd:
charm: cs:~containers/etcd
annotations:
"gui-x": "300"
"gui-y": "0"
num_units: 1
relations:
- - "kubernetes:etcd"
- "etcd:db"
series: xenial
| cluster/juju/bundles/local.yaml.base | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017619474965613335,
0.00017509335884824395,
0.00017399198259226978,
0.00017509335884824395,
0.0000011013835319317877
] |
{
"id": 0,
"code_window": [
" \"//federation/pkg/kubefed:go_default_library\",\n",
" \"//pkg/client/metrics/prometheus:go_default_library\",\n",
" \"//pkg/kubectl/cmd/util:go_default_library\",\n",
" \"//pkg/util/logs:go_default_library\",\n",
" \"//pkg/version/prometheus:go_default_library\",\n",
" ],\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
" \"//pkg/version:go_default_library\",\n"
],
"file_path": "federation/cmd/kubefed/app/BUILD",
"type": "add",
"edit_start_line_idx": 18
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"error.go",
"lexer.go",
],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| vendor/github.com/mailru/easyjson/jlexer/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.005298420786857605,
0.0014564909506589174,
0.00017411232693120837,
0.00017671543173491955,
0.0022181400563567877
] |
{
"id": 1,
"code_window": [
"\n",
"package app\n",
"\n",
"import (\n",
"\t\"os\"\n",
"\n",
"\t\"k8s.io/kubernetes/federation/pkg/kubefed\"\n",
"\t_ \"k8s.io/kubernetes/pkg/client/metrics/prometheus\" // for client metric registration\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 19
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubefed
import (
"io"
"k8s.io/apiserver/pkg/util/flag"
"k8s.io/client-go/tools/clientcmd"
kubefedinit "k8s.io/kubernetes/federation/pkg/kubefed/init"
"k8s.io/kubernetes/federation/pkg/kubefed/util"
kubectl "k8s.io/kubernetes/pkg/kubectl/cmd"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/spf13/cobra"
)
// NewKubeFedCommand creates the `kubefed` command and its nested children.
func NewKubeFedCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cobra.Command {
// Parent command to which all subcommands are added.
cmds := &cobra.Command{
Use: "kubefed",
Short: "kubefed controls a Kubernetes Cluster Federation",
Long: templates.LongDesc(`
kubefed controls a Kubernetes Cluster Federation.
Find more information at https://github.com/kubernetes/kubernetes.`),
Run: runHelp,
}
f.BindFlags(cmds.PersistentFlags())
f.BindExternalFlags(cmds.PersistentFlags())
// From this point and forward we get warnings on flags that contain "_" separators
cmds.SetGlobalNormalizationFunc(flag.WarnWordSepNormalizeFunc)
groups := templates.CommandGroups{
{
Message: "Basic Commands:",
Commands: []*cobra.Command{
kubefedinit.NewCmdInit(out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
NewCmdJoin(f, out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
NewCmdUnjoin(f, out, err, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
},
},
}
groups.Add(cmds)
filters := []string{
"options",
}
templates.ActsAsRootCommand(cmds, filters, groups...)
cmds.AddCommand(kubectl.NewCmdVersion(f, out))
cmds.AddCommand(kubectl.NewCmdOptions())
return cmds
}
func runHelp(cmd *cobra.Command, args []string) {
cmd.Help()
}
| federation/pkg/kubefed/kubefed.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0048201559111475945,
0.001359635847620666,
0.00016586329729761928,
0.00021462429140228778,
0.0019898219034075737
] |
{
"id": 1,
"code_window": [
"\n",
"package app\n",
"\n",
"import (\n",
"\t\"os\"\n",
"\n",
"\t\"k8s.io/kubernetes/federation/pkg/kubefed\"\n",
"\t_ \"k8s.io/kubernetes/pkg/client/metrics/prometheus\" // for client metric registration\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 19
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by lister-gen
package v1
import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
v1 "k8s.io/kubernetes/pkg/api/v1"
)
// EventLister helps list Events.
type EventLister interface {
// List lists all Events in the indexer.
List(selector labels.Selector) (ret []*v1.Event, err error)
// Events returns an object that can list and get Events.
Events(namespace string) EventNamespaceLister
EventListerExpansion
}
// eventLister implements the EventLister interface.
type eventLister struct {
indexer cache.Indexer
}
// NewEventLister returns a new EventLister.
func NewEventLister(indexer cache.Indexer) EventLister {
return &eventLister{indexer: indexer}
}
// List lists all Events in the indexer.
func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.Event))
})
return ret, err
}
// Events returns an object that can list and get Events.
func (s *eventLister) Events(namespace string) EventNamespaceLister {
return eventNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// EventNamespaceLister helps list and get Events.
type EventNamespaceLister interface {
// List lists all Events in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1.Event, err error)
// Get retrieves the Event from the indexer for a given namespace and name.
Get(name string) (*v1.Event, error)
EventNamespaceListerExpansion
}
// eventNamespaceLister implements the EventNamespaceLister
// interface.
type eventNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all Events in the indexer for a given namespace.
func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.Event))
})
return ret, err
}
// Get retrieves the Event from the indexer for a given namespace and name.
func (s eventNamespaceLister) Get(name string) (*v1.Event, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("event"), name)
}
return obj.(*v1.Event), nil
}
| pkg/client/listers/core/v1/event.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0034785529132932425,
0.0004998247604817152,
0.00016544770915061235,
0.0001685129973338917,
0.0009929132647812366
] |
{
"id": 1,
"code_window": [
"\n",
"package app\n",
"\n",
"import (\n",
"\t\"os\"\n",
"\n",
"\t\"k8s.io/kubernetes/federation/pkg/kubefed\"\n",
"\t_ \"k8s.io/kubernetes/pkg/client/metrics/prometheus\" // for client metric registration\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 19
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"apps_client.go",
"deployment.go",
"doc.go",
"generated_expansion.go",
"scale.go",
"statefulset.go",
],
tags = ["automanaged"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/pkg/apis/apps/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
)
| staging/src/k8s.io/client-go/kubernetes/typed/apps/v1beta1/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.050223737955093384,
0.012683426961302757,
0.00016761438746470958,
0.00017117755487561226,
0.021673906594514847
] |
{
"id": 1,
"code_window": [
"\n",
"package app\n",
"\n",
"import (\n",
"\t\"os\"\n",
"\n",
"\t\"k8s.io/kubernetes/federation/pkg/kubefed\"\n",
"\t_ \"k8s.io/kubernetes/pkg/client/metrics/prometheus\" // for client metric registration\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"fmt\"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 19
} | // Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rkt
import (
"fmt"
"github.com/google/cadvisor/container"
"github.com/google/cadvisor/container/libcontainer"
"github.com/google/cadvisor/fs"
info "github.com/google/cadvisor/info/v1"
"github.com/google/cadvisor/manager/watcher"
"github.com/golang/glog"
)
const RktNamespace = "rkt"
type rktFactory struct {
machineInfoFactory info.MachineInfoFactory
cgroupSubsystems *libcontainer.CgroupSubsystems
fsInfo fs.FsInfo
ignoreMetrics container.MetricSet
rktPath string
}
func (self *rktFactory) String() string {
return "rkt"
}
func (self *rktFactory) NewContainerHandler(name string, inHostNamespace bool) (container.ContainerHandler, error) {
client, err := Client()
if err != nil {
return nil, err
}
rootFs := "/"
if !inHostNamespace {
rootFs = "/rootfs"
}
return newRktContainerHandler(name, client, self.rktPath, self.cgroupSubsystems, self.machineInfoFactory, self.fsInfo, rootFs, self.ignoreMetrics)
}
func (self *rktFactory) CanHandleAndAccept(name string) (bool, bool, error) {
accept, err := verifyPod(name)
return accept, accept, err
}
func (self *rktFactory) DebugInfo() map[string][]string {
return map[string][]string{}
}
func Register(machineInfoFactory info.MachineInfoFactory, fsInfo fs.FsInfo, ignoreMetrics container.MetricSet) error {
_, err := Client()
if err != nil {
return fmt.Errorf("unable to communicate with Rkt api service: %v", err)
}
rktPath, err := RktPath()
if err != nil {
return fmt.Errorf("unable to get the RktPath variable %v", err)
}
cgroupSubsystems, err := libcontainer.GetCgroupSubsystems()
if err != nil {
return fmt.Errorf("failed to get cgroup subsystems: %v", err)
}
if len(cgroupSubsystems.Mounts) == 0 {
return fmt.Errorf("failed to find supported cgroup mounts for the raw factory")
}
glog.Infof("Registering Rkt factory")
factory := &rktFactory{
machineInfoFactory: machineInfoFactory,
fsInfo: fsInfo,
cgroupSubsystems: &cgroupSubsystems,
ignoreMetrics: ignoreMetrics,
rktPath: rktPath,
}
container.RegisterContainerHandlerFactory(factory, []watcher.ContainerWatchSource{watcher.Rkt})
return nil
}
| vendor/github.com/google/cadvisor/container/rkt/factory.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00025819489383138716,
0.0001842212222982198,
0.00016693126235622913,
0.00017004235996864736,
0.000029693368560401723
] |
{
"id": 2,
"code_window": [
"\t\"k8s.io/kubernetes/federation/pkg/kubefed\"\n",
"\t_ \"k8s.io/kubernetes/pkg/client/metrics/prometheus\" // for client metric registration\n",
"\tcmdutil \"k8s.io/kubernetes/pkg/kubectl/cmd/util\"\n",
"\t\"k8s.io/kubernetes/pkg/util/logs\"\n",
"\t_ \"k8s.io/kubernetes/pkg/version/prometheus\" // for version metric registration\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/kubernetes/pkg/version\"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 25
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubefed
import (
"io"
"k8s.io/apiserver/pkg/util/flag"
"k8s.io/client-go/tools/clientcmd"
kubefedinit "k8s.io/kubernetes/federation/pkg/kubefed/init"
"k8s.io/kubernetes/federation/pkg/kubefed/util"
kubectl "k8s.io/kubernetes/pkg/kubectl/cmd"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/spf13/cobra"
)
// NewKubeFedCommand creates the `kubefed` command and its nested children.
func NewKubeFedCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cobra.Command {
// Parent command to which all subcommands are added.
cmds := &cobra.Command{
Use: "kubefed",
Short: "kubefed controls a Kubernetes Cluster Federation",
Long: templates.LongDesc(`
kubefed controls a Kubernetes Cluster Federation.
Find more information at https://github.com/kubernetes/kubernetes.`),
Run: runHelp,
}
f.BindFlags(cmds.PersistentFlags())
f.BindExternalFlags(cmds.PersistentFlags())
// From this point and forward we get warnings on flags that contain "_" separators
cmds.SetGlobalNormalizationFunc(flag.WarnWordSepNormalizeFunc)
groups := templates.CommandGroups{
{
Message: "Basic Commands:",
Commands: []*cobra.Command{
kubefedinit.NewCmdInit(out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
NewCmdJoin(f, out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
NewCmdUnjoin(f, out, err, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
},
},
}
groups.Add(cmds)
filters := []string{
"options",
}
templates.ActsAsRootCommand(cmds, filters, groups...)
cmds.AddCommand(kubectl.NewCmdVersion(f, out))
cmds.AddCommand(kubectl.NewCmdOptions())
return cmds
}
func runHelp(cmd *cobra.Command, args []string) {
cmd.Help()
}
| federation/pkg/kubefed/kubefed.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.021099403500556946,
0.004039118066430092,
0.00016574820620007813,
0.000186869889148511,
0.007196234539151192
] |
{
"id": 2,
"code_window": [
"\t\"k8s.io/kubernetes/federation/pkg/kubefed\"\n",
"\t_ \"k8s.io/kubernetes/pkg/client/metrics/prometheus\" // for client metric registration\n",
"\tcmdutil \"k8s.io/kubernetes/pkg/kubectl/cmd/util\"\n",
"\t\"k8s.io/kubernetes/pkg/util/logs\"\n",
"\t_ \"k8s.io/kubernetes/pkg/version/prometheus\" // for version metric registration\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/kubernetes/pkg/version\"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 25
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubeadm
import (
"bytes"
"fmt"
"os/exec"
)
// Forked from test/e2e/framework because the e2e framework is quite bloated
// for our purposes here, and modified to remove undesired logging.
func RunCmd(command string, args ...string) (string, string, error) {
var bout, berr bytes.Buffer
cmd := exec.Command(command, args...)
cmd.Stdout = &bout
cmd.Stderr = &berr
err := cmd.Run()
stdout, stderr := bout.String(), berr.String()
if err != nil {
return "", "", fmt.Errorf("error running %s %v; \ngot error %v, \nstdout %q, \nstderr %q",
command, args, err, stdout, stderr)
}
return stdout, stderr, nil
}
| cmd/kubeadm/test/cmd/util.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00020764062355738133,
0.00018170964904129505,
0.0001704345631878823,
0.00017438172653783113,
0.000015059197721711826
] |
{
"id": 2,
"code_window": [
"\t\"k8s.io/kubernetes/federation/pkg/kubefed\"\n",
"\t_ \"k8s.io/kubernetes/pkg/client/metrics/prometheus\" // for client metric registration\n",
"\tcmdutil \"k8s.io/kubernetes/pkg/kubectl/cmd/util\"\n",
"\t\"k8s.io/kubernetes/pkg/util/logs\"\n",
"\t_ \"k8s.io/kubernetes/pkg/version/prometheus\" // for version metric registration\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/kubernetes/pkg/version\"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 25
} | // Code generated by protoc-gen-go.
// source: api.proto
// DO NOT EDIT!
/*
Package v1alpha is a generated protocol buffer package.
It is generated from these files:
api.proto
It has these top-level messages:
ImageFormat
Image
Network
App
Pod
KeyValue
PodFilter
ImageFilter
GlobalFlags
Info
Event
EventFilter
GetInfoRequest
GetInfoResponse
ListPodsRequest
ListPodsResponse
InspectPodRequest
InspectPodResponse
ListImagesRequest
ListImagesResponse
InspectImageRequest
InspectImageResponse
ListenEventsRequest
ListenEventsResponse
GetLogsRequest
GetLogsResponse
*/
package v1alpha
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// ImageType defines the supported image type.
type ImageType int32
const (
ImageType_IMAGE_TYPE_UNDEFINED ImageType = 0
ImageType_IMAGE_TYPE_APPC ImageType = 1
ImageType_IMAGE_TYPE_DOCKER ImageType = 2
ImageType_IMAGE_TYPE_OCI ImageType = 3
)
var ImageType_name = map[int32]string{
0: "IMAGE_TYPE_UNDEFINED",
1: "IMAGE_TYPE_APPC",
2: "IMAGE_TYPE_DOCKER",
3: "IMAGE_TYPE_OCI",
}
var ImageType_value = map[string]int32{
"IMAGE_TYPE_UNDEFINED": 0,
"IMAGE_TYPE_APPC": 1,
"IMAGE_TYPE_DOCKER": 2,
"IMAGE_TYPE_OCI": 3,
}
func (x ImageType) String() string {
return proto.EnumName(ImageType_name, int32(x))
}
func (ImageType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
// AppState defines the possible states of the app.
type AppState int32
const (
AppState_APP_STATE_UNDEFINED AppState = 0
AppState_APP_STATE_RUNNING AppState = 1
AppState_APP_STATE_EXITED AppState = 2
)
var AppState_name = map[int32]string{
0: "APP_STATE_UNDEFINED",
1: "APP_STATE_RUNNING",
2: "APP_STATE_EXITED",
}
var AppState_value = map[string]int32{
"APP_STATE_UNDEFINED": 0,
"APP_STATE_RUNNING": 1,
"APP_STATE_EXITED": 2,
}
func (x AppState) String() string {
return proto.EnumName(AppState_name, int32(x))
}
func (AppState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
// PodState defines the possible states of the pod.
// See https://github.com/coreos/rkt/blob/master/Documentation/devel/pod-lifecycle.md for a detailed
// explanation of each state.
type PodState int32
const (
PodState_POD_STATE_UNDEFINED PodState = 0
// States before the pod is running.
PodState_POD_STATE_EMBRYO PodState = 1
PodState_POD_STATE_PREPARING PodState = 2
PodState_POD_STATE_PREPARED PodState = 3
// State that indicates the pod is running.
PodState_POD_STATE_RUNNING PodState = 4
// States that indicates the pod is exited, and will never run.
PodState_POD_STATE_ABORTED_PREPARE PodState = 5
PodState_POD_STATE_EXITED PodState = 6
PodState_POD_STATE_DELETING PodState = 7
PodState_POD_STATE_GARBAGE PodState = 8
)
var PodState_name = map[int32]string{
0: "POD_STATE_UNDEFINED",
1: "POD_STATE_EMBRYO",
2: "POD_STATE_PREPARING",
3: "POD_STATE_PREPARED",
4: "POD_STATE_RUNNING",
5: "POD_STATE_ABORTED_PREPARE",
6: "POD_STATE_EXITED",
7: "POD_STATE_DELETING",
8: "POD_STATE_GARBAGE",
}
var PodState_value = map[string]int32{
"POD_STATE_UNDEFINED": 0,
"POD_STATE_EMBRYO": 1,
"POD_STATE_PREPARING": 2,
"POD_STATE_PREPARED": 3,
"POD_STATE_RUNNING": 4,
"POD_STATE_ABORTED_PREPARE": 5,
"POD_STATE_EXITED": 6,
"POD_STATE_DELETING": 7,
"POD_STATE_GARBAGE": 8,
}
func (x PodState) String() string {
return proto.EnumName(PodState_name, int32(x))
}
func (PodState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
// EventType defines the type of the events that will be received via ListenEvents().
type EventType int32
const (
EventType_EVENT_TYPE_UNDEFINED EventType = 0
// Pod events.
EventType_EVENT_TYPE_POD_PREPARED EventType = 1
EventType_EVENT_TYPE_POD_PREPARE_ABORTED EventType = 2
EventType_EVENT_TYPE_POD_STARTED EventType = 3
EventType_EVENT_TYPE_POD_EXITED EventType = 4
EventType_EVENT_TYPE_POD_GARBAGE_COLLECTED EventType = 5
// App events.
EventType_EVENT_TYPE_APP_STARTED EventType = 6
EventType_EVENT_TYPE_APP_EXITED EventType = 7
// Image events.
EventType_EVENT_TYPE_IMAGE_IMPORTED EventType = 8
EventType_EVENT_TYPE_IMAGE_REMOVED EventType = 9
)
var EventType_name = map[int32]string{
0: "EVENT_TYPE_UNDEFINED",
1: "EVENT_TYPE_POD_PREPARED",
2: "EVENT_TYPE_POD_PREPARE_ABORTED",
3: "EVENT_TYPE_POD_STARTED",
4: "EVENT_TYPE_POD_EXITED",
5: "EVENT_TYPE_POD_GARBAGE_COLLECTED",
6: "EVENT_TYPE_APP_STARTED",
7: "EVENT_TYPE_APP_EXITED",
8: "EVENT_TYPE_IMAGE_IMPORTED",
9: "EVENT_TYPE_IMAGE_REMOVED",
}
var EventType_value = map[string]int32{
"EVENT_TYPE_UNDEFINED": 0,
"EVENT_TYPE_POD_PREPARED": 1,
"EVENT_TYPE_POD_PREPARE_ABORTED": 2,
"EVENT_TYPE_POD_STARTED": 3,
"EVENT_TYPE_POD_EXITED": 4,
"EVENT_TYPE_POD_GARBAGE_COLLECTED": 5,
"EVENT_TYPE_APP_STARTED": 6,
"EVENT_TYPE_APP_EXITED": 7,
"EVENT_TYPE_IMAGE_IMPORTED": 8,
"EVENT_TYPE_IMAGE_REMOVED": 9,
}
func (x EventType) String() string {
return proto.EnumName(EventType_name, int32(x))
}
func (EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
// ImageFormat defines the format of the image.
type ImageFormat struct {
// Type of the image, required.
Type ImageType `protobuf:"varint,1,opt,name=type,enum=v1alpha.ImageType" json:"type,omitempty"`
// Version of the image format, required.
Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
}
func (m *ImageFormat) Reset() { *m = ImageFormat{} }
func (m *ImageFormat) String() string { return proto.CompactTextString(m) }
func (*ImageFormat) ProtoMessage() {}
func (*ImageFormat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *ImageFormat) GetType() ImageType {
if m != nil {
return m.Type
}
return ImageType_IMAGE_TYPE_UNDEFINED
}
func (m *ImageFormat) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
// Image describes the image's information.
type Image struct {
// Base format of the image, required. This indicates the original format
// for the image as nowadays all the image formats will be transformed to
// ACI.
BaseFormat *ImageFormat `protobuf:"bytes,1,opt,name=base_format,json=baseFormat" json:"base_format,omitempty"`
// ID of the image, a string that can be used to uniquely identify the image,
// e.g. sha512 hash of the ACIs, required.
Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"`
// Name of the image in the image manifest, e.g. 'coreos.com/etcd', optional.
Name string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
// Version of the image, e.g. 'latest', '2.0.10', optional.
Version string `protobuf:"bytes,4,opt,name=version" json:"version,omitempty"`
// Timestamp of when the image is imported, it is the seconds since epoch, optional.
ImportTimestamp int64 `protobuf:"varint,5,opt,name=import_timestamp,json=importTimestamp" json:"import_timestamp,omitempty"`
// JSON-encoded byte array that represents the image manifest, optional.
Manifest []byte `protobuf:"bytes,6,opt,name=manifest,proto3" json:"manifest,omitempty"`
// Size is the size in bytes of this image in the store.
Size int64 `protobuf:"varint,7,opt,name=size" json:"size,omitempty"`
// Annotations on this image.
Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"`
// Labels of this image.
Labels []*KeyValue `protobuf:"bytes,9,rep,name=labels" json:"labels,omitempty"`
}
func (m *Image) Reset() { *m = Image{} }
func (m *Image) String() string { return proto.CompactTextString(m) }
func (*Image) ProtoMessage() {}
func (*Image) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Image) GetBaseFormat() *ImageFormat {
if m != nil {
return m.BaseFormat
}
return nil
}
func (m *Image) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Image) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Image) GetVersion() string {
if m != nil {
return m.Version
}
return ""
}
func (m *Image) GetImportTimestamp() int64 {
if m != nil {
return m.ImportTimestamp
}
return 0
}
func (m *Image) GetManifest() []byte {
if m != nil {
return m.Manifest
}
return nil
}
func (m *Image) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *Image) GetAnnotations() []*KeyValue {
if m != nil {
return m.Annotations
}
return nil
}
func (m *Image) GetLabels() []*KeyValue {
if m != nil {
return m.Labels
}
return nil
}
// Network describes the network information of a pod.
type Network struct {
// Name of the network that a pod belongs to, required.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Pod's IPv4 address within the network, optional if IPv6 address is given.
Ipv4 string `protobuf:"bytes,2,opt,name=ipv4" json:"ipv4,omitempty"`
// Pod's IPv6 address within the network, optional if IPv4 address is given.
Ipv6 string `protobuf:"bytes,3,opt,name=ipv6" json:"ipv6,omitempty"`
}
func (m *Network) Reset() { *m = Network{} }
func (m *Network) String() string { return proto.CompactTextString(m) }
func (*Network) ProtoMessage() {}
func (*Network) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Network) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Network) GetIpv4() string {
if m != nil {
return m.Ipv4
}
return ""
}
func (m *Network) GetIpv6() string {
if m != nil {
return m.Ipv6
}
return ""
}
// App describes the information of an app that's running in a pod.
type App struct {
// Name of the app, required.
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Image used by the app, required. However, this may only contain the image id
// if it is returned by ListPods().
Image *Image `protobuf:"bytes,2,opt,name=image" json:"image,omitempty"`
// State of the app. optional, non-empty only if it's returned by InspectPod().
State AppState `protobuf:"varint,3,opt,name=state,enum=v1alpha.AppState" json:"state,omitempty"`
// Exit code of the app. optional, only valid if it's returned by InspectPod() and
// the app has already exited.
ExitCode int32 `protobuf:"zigzag32,4,opt,name=exit_code,json=exitCode" json:"exit_code,omitempty"`
// Annotations for this app.
Annotations []*KeyValue `protobuf:"bytes,5,rep,name=annotations" json:"annotations,omitempty"`
}
func (m *App) Reset() { *m = App{} }
func (m *App) String() string { return proto.CompactTextString(m) }
func (*App) ProtoMessage() {}
func (*App) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *App) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *App) GetImage() *Image {
if m != nil {
return m.Image
}
return nil
}
func (m *App) GetState() AppState {
if m != nil {
return m.State
}
return AppState_APP_STATE_UNDEFINED
}
func (m *App) GetExitCode() int32 {
if m != nil {
return m.ExitCode
}
return 0
}
func (m *App) GetAnnotations() []*KeyValue {
if m != nil {
return m.Annotations
}
return nil
}
// Pod describes a pod's information.
// If a pod is in Embryo, Preparing, AbortedPrepare state,
// only id and state will be returned.
//
// If a pod is in other states, the pod manifest and
// apps will be returned when 'detailed' is true in the request.
//
// A valid pid of the stage1 process of the pod will be returned
// if the pod is Running has run once.
//
// Networks are only returned when a pod is in Running.
type Pod struct {
// ID of the pod, in the form of a UUID.
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
// PID of the stage1 process of the pod.
Pid int32 `protobuf:"zigzag32,2,opt,name=pid" json:"pid,omitempty"`
// State of the pod.
State PodState `protobuf:"varint,3,opt,name=state,enum=v1alpha.PodState" json:"state,omitempty"`
// List of apps in the pod.
Apps []*App `protobuf:"bytes,4,rep,name=apps" json:"apps,omitempty"`
// Network information of the pod.
// Note that a pod can be in multiple networks.
Networks []*Network `protobuf:"bytes,5,rep,name=networks" json:"networks,omitempty"`
// JSON-encoded byte array that represents the pod manifest of the pod.
Manifest []byte `protobuf:"bytes,6,opt,name=manifest,proto3" json:"manifest,omitempty"`
// Annotations on this pod.
Annotations []*KeyValue `protobuf:"bytes,7,rep,name=annotations" json:"annotations,omitempty"`
// Cgroup of the pod, empty if the pod is not running.
Cgroup string `protobuf:"bytes,8,opt,name=cgroup" json:"cgroup,omitempty"`
// Timestamp of when the pod is created, nanoseconds since epoch.
// Zero if the pod is not created.
CreatedAt int64 `protobuf:"varint,9,opt,name=created_at,json=createdAt" json:"created_at,omitempty"`
// Timestamp of when the pod is started, nanoseconds since epoch.
// Zero if the pod is not started.
StartedAt int64 `protobuf:"varint,10,opt,name=started_at,json=startedAt" json:"started_at,omitempty"`
// Timestamp of when the pod is moved to exited-garbage/garbage,
// in nanoseconds since epoch.
// Zero if the pod is not moved to exited-garbage/garbage yet.
GcMarkedAt int64 `protobuf:"varint,11,opt,name=gc_marked_at,json=gcMarkedAt" json:"gc_marked_at,omitempty"`
}
func (m *Pod) Reset() { *m = Pod{} }
func (m *Pod) String() string { return proto.CompactTextString(m) }
func (*Pod) ProtoMessage() {}
func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Pod) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Pod) GetPid() int32 {
if m != nil {
return m.Pid
}
return 0
}
func (m *Pod) GetState() PodState {
if m != nil {
return m.State
}
return PodState_POD_STATE_UNDEFINED
}
func (m *Pod) GetApps() []*App {
if m != nil {
return m.Apps
}
return nil
}
func (m *Pod) GetNetworks() []*Network {
if m != nil {
return m.Networks
}
return nil
}
func (m *Pod) GetManifest() []byte {
if m != nil {
return m.Manifest
}
return nil
}
func (m *Pod) GetAnnotations() []*KeyValue {
if m != nil {
return m.Annotations
}
return nil
}
func (m *Pod) GetCgroup() string {
if m != nil {
return m.Cgroup
}
return ""
}
func (m *Pod) GetCreatedAt() int64 {
if m != nil {
return m.CreatedAt
}
return 0
}
func (m *Pod) GetStartedAt() int64 {
if m != nil {
return m.StartedAt
}
return 0
}
func (m *Pod) GetGcMarkedAt() int64 {
if m != nil {
return m.GcMarkedAt
}
return 0
}
type KeyValue struct {
// Key part of the key-value pair.
Key string `protobuf:"bytes,1,opt,name=Key" json:"Key,omitempty"`
// Value part of the key-value pair.
Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
func (m *KeyValue) Reset() { *m = KeyValue{} }
func (m *KeyValue) String() string { return proto.CompactTextString(m) }
func (*KeyValue) ProtoMessage() {}
func (*KeyValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *KeyValue) GetKey() string {
if m != nil {
return m.Key
}
return ""
}
func (m *KeyValue) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
// PodFilter defines the condition that the returned pods need to satisfy in ListPods().
// The conditions are combined by 'AND', and different filters are combined by 'OR'.
type PodFilter struct {
// If not empty, the pods that have any of the ids will be returned.
Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"`
// If not empty, the pods that have any of the states will be returned.
States []PodState `protobuf:"varint,2,rep,packed,name=states,enum=v1alpha.PodState" json:"states,omitempty"`
// If not empty, the pods that all of the apps will be returned.
AppNames []string `protobuf:"bytes,3,rep,name=app_names,json=appNames" json:"app_names,omitempty"`
// If not empty, the pods that have all of the images(in the apps) will be returned
ImageIds []string `protobuf:"bytes,4,rep,name=image_ids,json=imageIds" json:"image_ids,omitempty"`
// If not empty, the pods that are in all of the networks will be returned.
NetworkNames []string `protobuf:"bytes,5,rep,name=network_names,json=networkNames" json:"network_names,omitempty"`
// If not empty, the pods that have all of the annotations will be returned.
Annotations []*KeyValue `protobuf:"bytes,6,rep,name=annotations" json:"annotations,omitempty"`
// If not empty, the pods whose cgroup are listed will be returned.
Cgroups []string `protobuf:"bytes,7,rep,name=cgroups" json:"cgroups,omitempty"`
// If not empty, the pods whose these cgroup belong to will be returned.
// i.e. the pod's cgroup is a prefix of the specified cgroup
PodSubCgroups []string `protobuf:"bytes,8,rep,name=pod_sub_cgroups,json=podSubCgroups" json:"pod_sub_cgroups,omitempty"`
}
func (m *PodFilter) Reset() { *m = PodFilter{} }
func (m *PodFilter) String() string { return proto.CompactTextString(m) }
func (*PodFilter) ProtoMessage() {}
func (*PodFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *PodFilter) GetIds() []string {
if m != nil {
return m.Ids
}
return nil
}
func (m *PodFilter) GetStates() []PodState {
if m != nil {
return m.States
}
return nil
}
func (m *PodFilter) GetAppNames() []string {
if m != nil {
return m.AppNames
}
return nil
}
func (m *PodFilter) GetImageIds() []string {
if m != nil {
return m.ImageIds
}
return nil
}
func (m *PodFilter) GetNetworkNames() []string {
if m != nil {
return m.NetworkNames
}
return nil
}
func (m *PodFilter) GetAnnotations() []*KeyValue {
if m != nil {
return m.Annotations
}
return nil
}
func (m *PodFilter) GetCgroups() []string {
if m != nil {
return m.Cgroups
}
return nil
}
func (m *PodFilter) GetPodSubCgroups() []string {
if m != nil {
return m.PodSubCgroups
}
return nil
}
// ImageFilter defines the condition that the returned images need to satisfy in ListImages().
// The conditions are combined by 'AND', and different filters are combined by 'OR'.
type ImageFilter struct {
// If not empty, the images that have any of the ids will be returned.
Ids []string `protobuf:"bytes,1,rep,name=ids" json:"ids,omitempty"`
// if not empty, the images that have any of the prefixes in the name will be returned.
Prefixes []string `protobuf:"bytes,2,rep,name=prefixes" json:"prefixes,omitempty"`
// If not empty, the images that have any of the base names will be returned.
// For example, both 'coreos.com/etcd' and 'k8s.io/etcd' will be returned if 'etcd' is included,
// however 'k8s.io/etcd-backup' will not be returned.
BaseNames []string `protobuf:"bytes,3,rep,name=base_names,json=baseNames" json:"base_names,omitempty"`
// If not empty, the images that have any of the keywords in the name will be returned.
// For example, both 'kubernetes-etcd', 'etcd:latest' will be returned if 'etcd' is included,
Keywords []string `protobuf:"bytes,4,rep,name=keywords" json:"keywords,omitempty"`
// If not empty, the images that have all of the labels will be returned.
Labels []*KeyValue `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"`
// If set, the images that are imported after this timestamp will be returned.
ImportedAfter int64 `protobuf:"varint,6,opt,name=imported_after,json=importedAfter" json:"imported_after,omitempty"`
// If set, the images that are imported before this timestamp will be returned.
ImportedBefore int64 `protobuf:"varint,7,opt,name=imported_before,json=importedBefore" json:"imported_before,omitempty"`
// If not empty, the images that have all of the annotations will be returned.
Annotations []*KeyValue `protobuf:"bytes,8,rep,name=annotations" json:"annotations,omitempty"`
// If not empty, the images that have any of the exact full names will be returned.
FullNames []string `protobuf:"bytes,9,rep,name=full_names,json=fullNames" json:"full_names,omitempty"`
}
func (m *ImageFilter) Reset() { *m = ImageFilter{} }
func (m *ImageFilter) String() string { return proto.CompactTextString(m) }
func (*ImageFilter) ProtoMessage() {}
func (*ImageFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *ImageFilter) GetIds() []string {
if m != nil {
return m.Ids
}
return nil
}
func (m *ImageFilter) GetPrefixes() []string {
if m != nil {
return m.Prefixes
}
return nil
}
func (m *ImageFilter) GetBaseNames() []string {
if m != nil {
return m.BaseNames
}
return nil
}
func (m *ImageFilter) GetKeywords() []string {
if m != nil {
return m.Keywords
}
return nil
}
func (m *ImageFilter) GetLabels() []*KeyValue {
if m != nil {
return m.Labels
}
return nil
}
func (m *ImageFilter) GetImportedAfter() int64 {
if m != nil {
return m.ImportedAfter
}
return 0
}
func (m *ImageFilter) GetImportedBefore() int64 {
if m != nil {
return m.ImportedBefore
}
return 0
}
func (m *ImageFilter) GetAnnotations() []*KeyValue {
if m != nil {
return m.Annotations
}
return nil
}
func (m *ImageFilter) GetFullNames() []string {
if m != nil {
return m.FullNames
}
return nil
}
// GlobalFlags describes the flags that passed to rkt api service when it is launched.
type GlobalFlags struct {
// Data directory.
Dir string `protobuf:"bytes,1,opt,name=dir" json:"dir,omitempty"`
// System configuration directory.
SystemConfigDir string `protobuf:"bytes,2,opt,name=system_config_dir,json=systemConfigDir" json:"system_config_dir,omitempty"`
// Local configuration directory.
LocalConfigDir string `protobuf:"bytes,3,opt,name=local_config_dir,json=localConfigDir" json:"local_config_dir,omitempty"`
// User configuration directory.
UserConfigDir string `protobuf:"bytes,4,opt,name=user_config_dir,json=userConfigDir" json:"user_config_dir,omitempty"`
// Insecure flags configurates what security features to disable.
InsecureFlags string `protobuf:"bytes,5,opt,name=insecure_flags,json=insecureFlags" json:"insecure_flags,omitempty"`
// Whether to automatically trust gpg keys fetched from https
TrustKeysFromHttps bool `protobuf:"varint,6,opt,name=trust_keys_from_https,json=trustKeysFromHttps" json:"trust_keys_from_https,omitempty"`
}
func (m *GlobalFlags) Reset() { *m = GlobalFlags{} }
func (m *GlobalFlags) String() string { return proto.CompactTextString(m) }
func (*GlobalFlags) ProtoMessage() {}
func (*GlobalFlags) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *GlobalFlags) GetDir() string {
if m != nil {
return m.Dir
}
return ""
}
func (m *GlobalFlags) GetSystemConfigDir() string {
if m != nil {
return m.SystemConfigDir
}
return ""
}
func (m *GlobalFlags) GetLocalConfigDir() string {
if m != nil {
return m.LocalConfigDir
}
return ""
}
func (m *GlobalFlags) GetUserConfigDir() string {
if m != nil {
return m.UserConfigDir
}
return ""
}
func (m *GlobalFlags) GetInsecureFlags() string {
if m != nil {
return m.InsecureFlags
}
return ""
}
func (m *GlobalFlags) GetTrustKeysFromHttps() bool {
if m != nil {
return m.TrustKeysFromHttps
}
return false
}
// Info describes the information of rkt on the machine.
type Info struct {
// Version of rkt, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
RktVersion string `protobuf:"bytes,1,opt,name=rkt_version,json=rktVersion" json:"rkt_version,omitempty"`
// Version of appc, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
AppcVersion string `protobuf:"bytes,2,opt,name=appc_version,json=appcVersion" json:"appc_version,omitempty"`
// Latest version of the api that's supported by the service, required, in the form of Semantic Versioning 2.0.0 (http://semver.org/).
ApiVersion string `protobuf:"bytes,3,opt,name=api_version,json=apiVersion" json:"api_version,omitempty"`
// The global flags that passed to the rkt api service when it's launched.
GlobalFlags *GlobalFlags `protobuf:"bytes,4,opt,name=global_flags,json=globalFlags" json:"global_flags,omitempty"`
}
func (m *Info) Reset() { *m = Info{} }
func (m *Info) String() string { return proto.CompactTextString(m) }
func (*Info) ProtoMessage() {}
func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *Info) GetRktVersion() string {
if m != nil {
return m.RktVersion
}
return ""
}
func (m *Info) GetAppcVersion() string {
if m != nil {
return m.AppcVersion
}
return ""
}
func (m *Info) GetApiVersion() string {
if m != nil {
return m.ApiVersion
}
return ""
}
func (m *Info) GetGlobalFlags() *GlobalFlags {
if m != nil {
return m.GlobalFlags
}
return nil
}
// Event describes the events that will be received via ListenEvents().
type Event struct {
// Type of the event, required.
Type EventType `protobuf:"varint,1,opt,name=type,enum=v1alpha.EventType" json:"type,omitempty"`
// ID of the subject that causes the event, required.
// If the event is a pod or app event, the id is the pod's uuid.
// If the event is an image event, the id is the image's id.
Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"`
// Name of the subject that causes the event, required.
// If the event is a pod event, the name is the pod's name.
// If the event is an app event, the name is the app's name.
// If the event is an image event, the name is the image's name.
From string `protobuf:"bytes,3,opt,name=from" json:"from,omitempty"`
// Timestamp of when the event happens, it is the seconds since epoch, required.
Time int64 `protobuf:"varint,4,opt,name=time" json:"time,omitempty"`
// Data of the event, in the form of key-value pairs, optional.
Data []*KeyValue `protobuf:"bytes,5,rep,name=data" json:"data,omitempty"`
}
func (m *Event) Reset() { *m = Event{} }
func (m *Event) String() string { return proto.CompactTextString(m) }
func (*Event) ProtoMessage() {}
func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func (m *Event) GetType() EventType {
if m != nil {
return m.Type
}
return EventType_EVENT_TYPE_UNDEFINED
}
func (m *Event) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *Event) GetFrom() string {
if m != nil {
return m.From
}
return ""
}
func (m *Event) GetTime() int64 {
if m != nil {
return m.Time
}
return 0
}
func (m *Event) GetData() []*KeyValue {
if m != nil {
return m.Data
}
return nil
}
// EventFilter defines the condition that the returned events needs to satisfy in ListImages().
// The condition are combined by 'AND'.
type EventFilter struct {
// If not empty, then only returns the events that have the listed types.
Types []EventType `protobuf:"varint,1,rep,packed,name=types,enum=v1alpha.EventType" json:"types,omitempty"`
// If not empty, then only returns the events whose 'id' is included in the listed ids.
Ids []string `protobuf:"bytes,2,rep,name=ids" json:"ids,omitempty"`
// If not empty, then only returns the events whose 'from' is included in the listed names.
Names []string `protobuf:"bytes,3,rep,name=names" json:"names,omitempty"`
// If set, then only returns the events after this timestamp.
// If the server starts after since_time, then only the events happened after the start of the server will be returned.
// If since_time is a future timestamp, then no events will be returned until that time.
SinceTime int64 `protobuf:"varint,4,opt,name=since_time,json=sinceTime" json:"since_time,omitempty"`
// If set, then only returns the events before this timestamp.
// If it is a future timestamp, then the event stream will be closed at that moment.
UntilTime int64 `protobuf:"varint,5,opt,name=until_time,json=untilTime" json:"until_time,omitempty"`
}
func (m *EventFilter) Reset() { *m = EventFilter{} }
func (m *EventFilter) String() string { return proto.CompactTextString(m) }
func (*EventFilter) ProtoMessage() {}
func (*EventFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
func (m *EventFilter) GetTypes() []EventType {
if m != nil {
return m.Types
}
return nil
}
func (m *EventFilter) GetIds() []string {
if m != nil {
return m.Ids
}
return nil
}
func (m *EventFilter) GetNames() []string {
if m != nil {
return m.Names
}
return nil
}
func (m *EventFilter) GetSinceTime() int64 {
if m != nil {
return m.SinceTime
}
return 0
}
func (m *EventFilter) GetUntilTime() int64 {
if m != nil {
return m.UntilTime
}
return 0
}
// Request for GetInfo().
type GetInfoRequest struct {
}
func (m *GetInfoRequest) Reset() { *m = GetInfoRequest{} }
func (m *GetInfoRequest) String() string { return proto.CompactTextString(m) }
func (*GetInfoRequest) ProtoMessage() {}
func (*GetInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
// Response for GetInfo().
type GetInfoResponse struct {
Info *Info `protobuf:"bytes,1,opt,name=info" json:"info,omitempty"`
}
func (m *GetInfoResponse) Reset() { *m = GetInfoResponse{} }
func (m *GetInfoResponse) String() string { return proto.CompactTextString(m) }
func (*GetInfoResponse) ProtoMessage() {}
func (*GetInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
func (m *GetInfoResponse) GetInfo() *Info {
if m != nil {
return m.Info
}
return nil
}
// Request for ListPods().
type ListPodsRequest struct {
Filters []*PodFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"`
}
func (m *ListPodsRequest) Reset() { *m = ListPodsRequest{} }
func (m *ListPodsRequest) String() string { return proto.CompactTextString(m) }
func (*ListPodsRequest) ProtoMessage() {}
func (*ListPodsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
func (m *ListPodsRequest) GetFilters() []*PodFilter {
if m != nil {
return m.Filters
}
return nil
}
func (m *ListPodsRequest) GetDetail() bool {
if m != nil {
return m.Detail
}
return false
}
// Response for ListPods().
type ListPodsResponse struct {
Pods []*Pod `protobuf:"bytes,1,rep,name=pods" json:"pods,omitempty"`
}
func (m *ListPodsResponse) Reset() { *m = ListPodsResponse{} }
func (m *ListPodsResponse) String() string { return proto.CompactTextString(m) }
func (*ListPodsResponse) ProtoMessage() {}
func (*ListPodsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
func (m *ListPodsResponse) GetPods() []*Pod {
if m != nil {
return m.Pods
}
return nil
}
// Request for InspectPod().
type InspectPodRequest struct {
// ID of the pod which we are querying status for, required.
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
}
func (m *InspectPodRequest) Reset() { *m = InspectPodRequest{} }
func (m *InspectPodRequest) String() string { return proto.CompactTextString(m) }
func (*InspectPodRequest) ProtoMessage() {}
func (*InspectPodRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
func (m *InspectPodRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
// Response for InspectPod().
type InspectPodResponse struct {
Pod *Pod `protobuf:"bytes,1,opt,name=pod" json:"pod,omitempty"`
}
func (m *InspectPodResponse) Reset() { *m = InspectPodResponse{} }
func (m *InspectPodResponse) String() string { return proto.CompactTextString(m) }
func (*InspectPodResponse) ProtoMessage() {}
func (*InspectPodResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
func (m *InspectPodResponse) GetPod() *Pod {
if m != nil {
return m.Pod
}
return nil
}
// Request for ListImages().
type ListImagesRequest struct {
Filters []*ImageFilter `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
Detail bool `protobuf:"varint,2,opt,name=detail" json:"detail,omitempty"`
}
func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} }
func (m *ListImagesRequest) String() string { return proto.CompactTextString(m) }
func (*ListImagesRequest) ProtoMessage() {}
func (*ListImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *ListImagesRequest) GetFilters() []*ImageFilter {
if m != nil {
return m.Filters
}
return nil
}
func (m *ListImagesRequest) GetDetail() bool {
if m != nil {
return m.Detail
}
return false
}
// Response for ListImages().
type ListImagesResponse struct {
Images []*Image `protobuf:"bytes,1,rep,name=images" json:"images,omitempty"`
}
func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} }
func (m *ListImagesResponse) String() string { return proto.CompactTextString(m) }
func (*ListImagesResponse) ProtoMessage() {}
func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
func (m *ListImagesResponse) GetImages() []*Image {
if m != nil {
return m.Images
}
return nil
}
// Request for InspectImage().
type InspectImageRequest struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
}
func (m *InspectImageRequest) Reset() { *m = InspectImageRequest{} }
func (m *InspectImageRequest) String() string { return proto.CompactTextString(m) }
func (*InspectImageRequest) ProtoMessage() {}
func (*InspectImageRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *InspectImageRequest) GetId() string {
if m != nil {
return m.Id
}
return ""
}
// Response for InspectImage().
type InspectImageResponse struct {
Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"`
}
func (m *InspectImageResponse) Reset() { *m = InspectImageResponse{} }
func (m *InspectImageResponse) String() string { return proto.CompactTextString(m) }
func (*InspectImageResponse) ProtoMessage() {}
func (*InspectImageResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
func (m *InspectImageResponse) GetImage() *Image {
if m != nil {
return m.Image
}
return nil
}
// Request for ListenEvents().
type ListenEventsRequest struct {
Filter *EventFilter `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"`
}
func (m *ListenEventsRequest) Reset() { *m = ListenEventsRequest{} }
func (m *ListenEventsRequest) String() string { return proto.CompactTextString(m) }
func (*ListenEventsRequest) ProtoMessage() {}
func (*ListenEventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
func (m *ListenEventsRequest) GetFilter() *EventFilter {
if m != nil {
return m.Filter
}
return nil
}
// Response for ListenEvents().
type ListenEventsResponse struct {
// Aggregate multiple events to reduce round trips, optional as the response can contain no events.
Events []*Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"`
}
func (m *ListenEventsResponse) Reset() { *m = ListenEventsResponse{} }
func (m *ListenEventsResponse) String() string { return proto.CompactTextString(m) }
func (*ListenEventsResponse) ProtoMessage() {}
func (*ListenEventsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
func (m *ListenEventsResponse) GetEvents() []*Event {
if m != nil {
return m.Events
}
return nil
}
// Request for GetLogs().
type GetLogsRequest struct {
// ID of the pod which we will get logs from, required.
PodId string `protobuf:"bytes,1,opt,name=pod_id,json=podId" json:"pod_id,omitempty"`
// Name of the app within the pod which we will get logs
// from, optional. If not set, then the logs of all the
// apps within the pod will be returned.
AppName string `protobuf:"bytes,2,opt,name=app_name,json=appName" json:"app_name,omitempty"`
// Number of most recent lines to return, optional.
Lines int32 `protobuf:"varint,3,opt,name=lines" json:"lines,omitempty"`
// If true, then a response stream will not be closed,
// and new log response will be sent via the stream, default is false.
Follow bool `protobuf:"varint,4,opt,name=follow" json:"follow,omitempty"`
// If set, then only the logs after the timestamp will
// be returned, optional.
SinceTime int64 `protobuf:"varint,5,opt,name=since_time,json=sinceTime" json:"since_time,omitempty"`
// If set, then only the logs before the timestamp will
// be returned, optional.
UntilTime int64 `protobuf:"varint,6,opt,name=until_time,json=untilTime" json:"until_time,omitempty"`
}
func (m *GetLogsRequest) Reset() { *m = GetLogsRequest{} }
func (m *GetLogsRequest) String() string { return proto.CompactTextString(m) }
func (*GetLogsRequest) ProtoMessage() {}
func (*GetLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
func (m *GetLogsRequest) GetPodId() string {
if m != nil {
return m.PodId
}
return ""
}
func (m *GetLogsRequest) GetAppName() string {
if m != nil {
return m.AppName
}
return ""
}
func (m *GetLogsRequest) GetLines() int32 {
if m != nil {
return m.Lines
}
return 0
}
func (m *GetLogsRequest) GetFollow() bool {
if m != nil {
return m.Follow
}
return false
}
func (m *GetLogsRequest) GetSinceTime() int64 {
if m != nil {
return m.SinceTime
}
return 0
}
func (m *GetLogsRequest) GetUntilTime() int64 {
if m != nil {
return m.UntilTime
}
return 0
}
// Response for GetLogs().
type GetLogsResponse struct {
// List of the log lines that returned, optional as the response can contain no logs.
Lines []string `protobuf:"bytes,1,rep,name=lines" json:"lines,omitempty"`
}
func (m *GetLogsResponse) Reset() { *m = GetLogsResponse{} }
func (m *GetLogsResponse) String() string { return proto.CompactTextString(m) }
func (*GetLogsResponse) ProtoMessage() {}
func (*GetLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
func (m *GetLogsResponse) GetLines() []string {
if m != nil {
return m.Lines
}
return nil
}
func init() {
proto.RegisterType((*ImageFormat)(nil), "v1alpha.ImageFormat")
proto.RegisterType((*Image)(nil), "v1alpha.Image")
proto.RegisterType((*Network)(nil), "v1alpha.Network")
proto.RegisterType((*App)(nil), "v1alpha.App")
proto.RegisterType((*Pod)(nil), "v1alpha.Pod")
proto.RegisterType((*KeyValue)(nil), "v1alpha.KeyValue")
proto.RegisterType((*PodFilter)(nil), "v1alpha.PodFilter")
proto.RegisterType((*ImageFilter)(nil), "v1alpha.ImageFilter")
proto.RegisterType((*GlobalFlags)(nil), "v1alpha.GlobalFlags")
proto.RegisterType((*Info)(nil), "v1alpha.Info")
proto.RegisterType((*Event)(nil), "v1alpha.Event")
proto.RegisterType((*EventFilter)(nil), "v1alpha.EventFilter")
proto.RegisterType((*GetInfoRequest)(nil), "v1alpha.GetInfoRequest")
proto.RegisterType((*GetInfoResponse)(nil), "v1alpha.GetInfoResponse")
proto.RegisterType((*ListPodsRequest)(nil), "v1alpha.ListPodsRequest")
proto.RegisterType((*ListPodsResponse)(nil), "v1alpha.ListPodsResponse")
proto.RegisterType((*InspectPodRequest)(nil), "v1alpha.InspectPodRequest")
proto.RegisterType((*InspectPodResponse)(nil), "v1alpha.InspectPodResponse")
proto.RegisterType((*ListImagesRequest)(nil), "v1alpha.ListImagesRequest")
proto.RegisterType((*ListImagesResponse)(nil), "v1alpha.ListImagesResponse")
proto.RegisterType((*InspectImageRequest)(nil), "v1alpha.InspectImageRequest")
proto.RegisterType((*InspectImageResponse)(nil), "v1alpha.InspectImageResponse")
proto.RegisterType((*ListenEventsRequest)(nil), "v1alpha.ListenEventsRequest")
proto.RegisterType((*ListenEventsResponse)(nil), "v1alpha.ListenEventsResponse")
proto.RegisterType((*GetLogsRequest)(nil), "v1alpha.GetLogsRequest")
proto.RegisterType((*GetLogsResponse)(nil), "v1alpha.GetLogsResponse")
proto.RegisterEnum("v1alpha.ImageType", ImageType_name, ImageType_value)
proto.RegisterEnum("v1alpha.AppState", AppState_name, AppState_value)
proto.RegisterEnum("v1alpha.PodState", PodState_name, PodState_value)
proto.RegisterEnum("v1alpha.EventType", EventType_name, EventType_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for PublicAPI service
type PublicAPIClient interface {
// GetInfo gets the rkt's information on the machine.
GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error)
// ListPods lists rkt pods on the machine.
ListPods(ctx context.Context, in *ListPodsRequest, opts ...grpc.CallOption) (*ListPodsResponse, error)
// InspectPod gets detailed pod information of the specified pod.
InspectPod(ctx context.Context, in *InspectPodRequest, opts ...grpc.CallOption) (*InspectPodResponse, error)
// ListImages lists the images on the machine.
ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error)
// InspectImage gets the detailed image information of the specified image.
InspectImage(ctx context.Context, in *InspectImageRequest, opts ...grpc.CallOption) (*InspectImageResponse, error)
// ListenEvents listens for the events, it will return a response stream
// that will contain event objects.
ListenEvents(ctx context.Context, in *ListenEventsRequest, opts ...grpc.CallOption) (PublicAPI_ListenEventsClient, error)
// GetLogs gets the logs for a pod, if the app is also specified, then only the logs
// of the app will be returned.
//
// If 'follow' in the 'GetLogsRequest' is set to 'true', then the response stream
// will not be closed after the first response, the future logs will be sent via
// the stream.
GetLogs(ctx context.Context, in *GetLogsRequest, opts ...grpc.CallOption) (PublicAPI_GetLogsClient, error)
}
type publicAPIClient struct {
cc *grpc.ClientConn
}
func NewPublicAPIClient(cc *grpc.ClientConn) PublicAPIClient {
return &publicAPIClient{cc}
}
func (c *publicAPIClient) GetInfo(ctx context.Context, in *GetInfoRequest, opts ...grpc.CallOption) (*GetInfoResponse, error) {
out := new(GetInfoResponse)
err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/GetInfo", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *publicAPIClient) ListPods(ctx context.Context, in *ListPodsRequest, opts ...grpc.CallOption) (*ListPodsResponse, error) {
out := new(ListPodsResponse)
err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/ListPods", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *publicAPIClient) InspectPod(ctx context.Context, in *InspectPodRequest, opts ...grpc.CallOption) (*InspectPodResponse, error) {
out := new(InspectPodResponse)
err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/InspectPod", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *publicAPIClient) ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) {
out := new(ListImagesResponse)
err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/ListImages", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *publicAPIClient) InspectImage(ctx context.Context, in *InspectImageRequest, opts ...grpc.CallOption) (*InspectImageResponse, error) {
out := new(InspectImageResponse)
err := grpc.Invoke(ctx, "/v1alpha.PublicAPI/InspectImage", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *publicAPIClient) ListenEvents(ctx context.Context, in *ListenEventsRequest, opts ...grpc.CallOption) (PublicAPI_ListenEventsClient, error) {
stream, err := grpc.NewClientStream(ctx, &_PublicAPI_serviceDesc.Streams[0], c.cc, "/v1alpha.PublicAPI/ListenEvents", opts...)
if err != nil {
return nil, err
}
x := &publicAPIListenEventsClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type PublicAPI_ListenEventsClient interface {
Recv() (*ListenEventsResponse, error)
grpc.ClientStream
}
type publicAPIListenEventsClient struct {
grpc.ClientStream
}
func (x *publicAPIListenEventsClient) Recv() (*ListenEventsResponse, error) {
m := new(ListenEventsResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *publicAPIClient) GetLogs(ctx context.Context, in *GetLogsRequest, opts ...grpc.CallOption) (PublicAPI_GetLogsClient, error) {
stream, err := grpc.NewClientStream(ctx, &_PublicAPI_serviceDesc.Streams[1], c.cc, "/v1alpha.PublicAPI/GetLogs", opts...)
if err != nil {
return nil, err
}
x := &publicAPIGetLogsClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type PublicAPI_GetLogsClient interface {
Recv() (*GetLogsResponse, error)
grpc.ClientStream
}
type publicAPIGetLogsClient struct {
grpc.ClientStream
}
func (x *publicAPIGetLogsClient) Recv() (*GetLogsResponse, error) {
m := new(GetLogsResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for PublicAPI service
type PublicAPIServer interface {
// GetInfo gets the rkt's information on the machine.
GetInfo(context.Context, *GetInfoRequest) (*GetInfoResponse, error)
// ListPods lists rkt pods on the machine.
ListPods(context.Context, *ListPodsRequest) (*ListPodsResponse, error)
// InspectPod gets detailed pod information of the specified pod.
InspectPod(context.Context, *InspectPodRequest) (*InspectPodResponse, error)
// ListImages lists the images on the machine.
ListImages(context.Context, *ListImagesRequest) (*ListImagesResponse, error)
// InspectImage gets the detailed image information of the specified image.
InspectImage(context.Context, *InspectImageRequest) (*InspectImageResponse, error)
// ListenEvents listens for the events, it will return a response stream
// that will contain event objects.
ListenEvents(*ListenEventsRequest, PublicAPI_ListenEventsServer) error
// GetLogs gets the logs for a pod, if the app is also specified, then only the logs
// of the app will be returned.
//
// If 'follow' in the 'GetLogsRequest' is set to 'true', then the response stream
// will not be closed after the first response, the future logs will be sent via
// the stream.
GetLogs(*GetLogsRequest, PublicAPI_GetLogsServer) error
}
func RegisterPublicAPIServer(s *grpc.Server, srv PublicAPIServer) {
s.RegisterService(&_PublicAPI_serviceDesc, srv)
}
func _PublicAPI_GetInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetInfoRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PublicAPIServer).GetInfo(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1alpha.PublicAPI/GetInfo",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PublicAPIServer).GetInfo(ctx, req.(*GetInfoRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PublicAPI_ListPods_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListPodsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PublicAPIServer).ListPods(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1alpha.PublicAPI/ListPods",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PublicAPIServer).ListPods(ctx, req.(*ListPodsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PublicAPI_InspectPod_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InspectPodRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PublicAPIServer).InspectPod(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1alpha.PublicAPI/InspectPod",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PublicAPIServer).InspectPod(ctx, req.(*InspectPodRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PublicAPI_ListImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ListImagesRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PublicAPIServer).ListImages(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1alpha.PublicAPI/ListImages",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PublicAPIServer).ListImages(ctx, req.(*ListImagesRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PublicAPI_InspectImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(InspectImageRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PublicAPIServer).InspectImage(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/v1alpha.PublicAPI/InspectImage",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PublicAPIServer).InspectImage(ctx, req.(*InspectImageRequest))
}
return interceptor(ctx, in, info, handler)
}
func _PublicAPI_ListenEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(ListenEventsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(PublicAPIServer).ListenEvents(m, &publicAPIListenEventsServer{stream})
}
type PublicAPI_ListenEventsServer interface {
Send(*ListenEventsResponse) error
grpc.ServerStream
}
type publicAPIListenEventsServer struct {
grpc.ServerStream
}
func (x *publicAPIListenEventsServer) Send(m *ListenEventsResponse) error {
return x.ServerStream.SendMsg(m)
}
func _PublicAPI_GetLogs_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(GetLogsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(PublicAPIServer).GetLogs(m, &publicAPIGetLogsServer{stream})
}
type PublicAPI_GetLogsServer interface {
Send(*GetLogsResponse) error
grpc.ServerStream
}
type publicAPIGetLogsServer struct {
grpc.ServerStream
}
func (x *publicAPIGetLogsServer) Send(m *GetLogsResponse) error {
return x.ServerStream.SendMsg(m)
}
var _PublicAPI_serviceDesc = grpc.ServiceDesc{
ServiceName: "v1alpha.PublicAPI",
HandlerType: (*PublicAPIServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetInfo",
Handler: _PublicAPI_GetInfo_Handler,
},
{
MethodName: "ListPods",
Handler: _PublicAPI_ListPods_Handler,
},
{
MethodName: "InspectPod",
Handler: _PublicAPI_InspectPod_Handler,
},
{
MethodName: "ListImages",
Handler: _PublicAPI_ListImages_Handler,
},
{
MethodName: "InspectImage",
Handler: _PublicAPI_InspectImage_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ListenEvents",
Handler: _PublicAPI_ListenEvents_Handler,
ServerStreams: true,
},
{
StreamName: "GetLogs",
Handler: _PublicAPI_GetLogs_Handler,
ServerStreams: true,
},
},
Metadata: "api.proto",
}
func init() { proto.RegisterFile("api.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 1800 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x58, 0x4f, 0x73, 0xdb, 0xc6,
0x15, 0x37, 0xf8, 0x17, 0x78, 0xd4, 0x1f, 0x68, 0x2d, 0xd9, 0x30, 0x1d, 0x27, 0x0c, 0x12, 0xdb,
0x8a, 0x26, 0xe3, 0x69, 0x14, 0xb7, 0xbd, 0x64, 0x32, 0xa5, 0x49, 0x48, 0xe5, 0x58, 0x12, 0x39,
0x6b, 0xc6, 0x6d, 0xa6, 0x07, 0x0c, 0x44, 0x2c, 0x19, 0x8c, 0x40, 0x00, 0x05, 0x96, 0x72, 0xd4,
0x63, 0x3f, 0x40, 0xbf, 0x41, 0x7b, 0xea, 0xb9, 0xd7, 0xcc, 0xf4, 0xde, 0x8f, 0xd2, 0x43, 0x3f,
0x41, 0xaf, 0x9d, 0xfd, 0x03, 0x60, 0x09, 0x51, 0xae, 0x27, 0xb7, 0xdd, 0xf7, 0xfb, 0xe1, 0xed,
0xfb, 0xb7, 0xef, 0x2d, 0x09, 0x86, 0x97, 0x04, 0x2f, 0x92, 0x34, 0xa6, 0x31, 0x6a, 0x5f, 0x7f,
0xe5, 0x85, 0xc9, 0x0f, 0x9e, 0x3d, 0x86, 0xce, 0x68, 0xe9, 0x2d, 0xc8, 0x49, 0x9c, 0x2e, 0x3d,
0x8a, 0x9e, 0x41, 0x83, 0xde, 0x24, 0xc4, 0xd2, 0x7a, 0xda, 0xe1, 0xce, 0x31, 0x7a, 0x21, 0x69,
0x2f, 0x38, 0x67, 0x7a, 0x93, 0x10, 0xcc, 0x71, 0x64, 0x41, 0xfb, 0x9a, 0xa4, 0x59, 0x10, 0x47,
0x56, 0xad, 0xa7, 0x1d, 0x1a, 0x38, 0xdf, 0xda, 0x3f, 0xd5, 0xa0, 0xc9, 0xd9, 0xe8, 0x97, 0xd0,
0xb9, 0xf4, 0x32, 0xe2, 0xce, 0xb9, 0x6a, 0xae, 0xb2, 0x73, 0xbc, 0xbf, 0xae, 0x52, 0x1c, 0x8b,
0x81, 0x11, 0xa5, 0x09, 0x3b, 0x50, 0x0b, 0x7c, 0xa9, 0xb5, 0x16, 0xf8, 0x08, 0x41, 0x23, 0xf2,
0x96, 0xc4, 0xaa, 0x73, 0x09, 0x5f, 0xab, 0xc7, 0x37, 0xd6, 0x8e, 0x47, 0x5f, 0x80, 0x19, 0x2c,
0x93, 0x38, 0xa5, 0x2e, 0x0d, 0x96, 0x24, 0xa3, 0xde, 0x32, 0xb1, 0x9a, 0x3d, 0xed, 0xb0, 0x8e,
0x77, 0x85, 0x7c, 0x9a, 0x8b, 0x51, 0x17, 0xf4, 0xa5, 0x17, 0x05, 0x73, 0x92, 0x51, 0xab, 0xd5,
0xd3, 0x0e, 0xb7, 0x70, 0xb1, 0x67, 0x87, 0x66, 0xc1, 0x9f, 0x88, 0xd5, 0xe6, 0x9f, 0xf2, 0x35,
0xfa, 0x1a, 0x3a, 0x5e, 0x14, 0xc5, 0xd4, 0xa3, 0x41, 0x1c, 0x65, 0x96, 0xde, 0xab, 0x1f, 0x76,
0x8e, 0xf7, 0x0a, 0x7f, 0x5e, 0x93, 0x9b, 0xb7, 0x5e, 0xb8, 0x22, 0x58, 0x65, 0xa1, 0x2f, 0xa0,
0x15, 0x7a, 0x97, 0x24, 0xcc, 0x2c, 0xe3, 0x2e, 0xbe, 0x24, 0xd8, 0x0e, 0xb4, 0x2f, 0x08, 0x7d,
0x17, 0xa7, 0x57, 0x85, 0xcf, 0x9a, 0xe2, 0x33, 0x82, 0x46, 0x90, 0x5c, 0xbf, 0x94, 0x91, 0xe1,
0x6b, 0x29, 0xfb, 0x55, 0x1e, 0x1b, 0xb6, 0xb6, 0xff, 0xa9, 0x41, 0xbd, 0x9f, 0x24, 0x1b, 0x75,
0x7c, 0x0e, 0xcd, 0x80, 0x85, 0x9d, 0x2b, 0xe9, 0x1c, 0xef, 0xac, 0x27, 0x03, 0x0b, 0x10, 0x3d,
0x87, 0x66, 0x46, 0x3d, 0x2a, 0x42, 0xbe, 0xa3, 0x98, 0xdc, 0x4f, 0x92, 0x37, 0x0c, 0xc0, 0x02,
0x47, 0x8f, 0xc1, 0x20, 0x3f, 0x06, 0xd4, 0x9d, 0xc5, 0x3e, 0xe1, 0x89, 0xd8, 0xc3, 0x3a, 0x13,
0x0c, 0x62, 0xff, 0x56, 0xb8, 0x9a, 0x1f, 0x12, 0x2e, 0xfb, 0x3f, 0x35, 0xa8, 0x4f, 0x62, 0x5f,
0x16, 0x81, 0x56, 0x14, 0x81, 0x09, 0xf5, 0x44, 0x56, 0xc5, 0x1e, 0x66, 0xcb, 0xbb, 0x8d, 0x9c,
0xc4, 0xfe, 0x9a, 0x91, 0x3d, 0x68, 0x78, 0x49, 0x92, 0x59, 0x0d, 0x6e, 0xc0, 0x96, 0xea, 0x0c,
0xe6, 0x08, 0xfa, 0x12, 0xf4, 0x48, 0x04, 0x3e, 0x37, 0xd3, 0x2c, 0x58, 0x32, 0x23, 0xb8, 0x60,
0xbc, 0xb7, 0x6c, 0x2a, 0x3e, 0xb7, 0x3f, 0xa8, 0x44, 0x1e, 0x40, 0x6b, 0xb6, 0x48, 0xe3, 0x55,
0x62, 0xe9, 0xdc, 0x5f, 0xb9, 0x43, 0x4f, 0x00, 0x66, 0x29, 0xf1, 0x28, 0xf1, 0x5d, 0x8f, 0x5a,
0x06, 0xaf, 0x44, 0x43, 0x4a, 0xfa, 0x94, 0xc1, 0x19, 0xf5, 0x52, 0x09, 0x83, 0x80, 0xa5, 0xa4,
0x4f, 0x51, 0x0f, 0xb6, 0x16, 0x33, 0x77, 0xe9, 0xa5, 0x57, 0x82, 0xd0, 0xe1, 0x04, 0x58, 0xcc,
0xce, 0xb9, 0xa8, 0x4f, 0xed, 0x63, 0xd0, 0x73, 0x83, 0x58, 0x7c, 0x5f, 0x93, 0x1b, 0x19, 0x70,
0xb6, 0x44, 0xfb, 0xd0, 0xbc, 0x66, 0x90, 0xac, 0x37, 0xb1, 0xb1, 0xff, 0x56, 0x03, 0x63, 0x12,
0xfb, 0x27, 0x41, 0x48, 0x49, 0xca, 0xbe, 0x0a, 0xfc, 0xcc, 0xd2, 0x7a, 0x75, 0xf6, 0x55, 0xe0,
0xf3, 0x72, 0xe7, 0x51, 0xcf, 0xac, 0x5a, 0xaf, 0xbe, 0x39, 0x2d, 0x92, 0xc0, 0x8a, 0xc7, 0x4b,
0x12, 0x97, 0xd5, 0x65, 0x66, 0xd5, 0xb9, 0x0a, 0xdd, 0x4b, 0x92, 0x0b, 0xb6, 0x67, 0x20, 0xaf,
0x45, 0x97, 0xe9, 0x6f, 0x08, 0x90, 0x0b, 0x46, 0x7e, 0x86, 0x3e, 0x83, 0x6d, 0x99, 0x0d, 0xf9,
0x75, 0x93, 0x13, 0xb6, 0xa4, 0x50, 0x68, 0xa8, 0xa4, 0xa2, 0xf5, 0x41, 0xa9, 0xb0, 0xa0, 0x2d,
0x82, 0x2f, 0x72, 0x67, 0xe0, 0x7c, 0x8b, 0x9e, 0xc1, 0x6e, 0x12, 0xfb, 0x6e, 0xb6, 0xba, 0x74,
0x73, 0x86, 0xce, 0x19, 0xdb, 0x49, 0xec, 0xbf, 0x59, 0x5d, 0x0e, 0x84, 0xd0, 0xfe, 0x57, 0x2d,
0x6f, 0xa8, 0x77, 0x85, 0xa8, 0x0b, 0x7a, 0x92, 0x92, 0x79, 0xf0, 0xa3, 0x0c, 0x92, 0x81, 0x8b,
0x3d, 0xcb, 0x29, 0x6f, 0x99, 0x6a, 0x50, 0x0c, 0x26, 0x11, 0x3e, 0x75, 0x41, 0xbf, 0x22, 0x37,
0xef, 0xe2, 0xb4, 0x0c, 0x4a, 0xbe, 0x57, 0x1a, 0x4d, 0xf3, 0xff, 0x34, 0x1a, 0xf4, 0x14, 0x76,
0x44, 0x2f, 0x64, 0x95, 0x31, 0xa7, 0x24, 0xe5, 0x75, 0x5c, 0xc7, 0xdb, 0xb9, 0xb4, 0xcf, 0x84,
0xe8, 0x39, 0xec, 0x16, 0xb4, 0x4b, 0x32, 0x8f, 0xd3, 0xbc, 0x1d, 0x16, 0x5f, 0xbf, 0xe2, 0xd2,
0x9f, 0xd7, 0x18, 0x9f, 0x00, 0xcc, 0x57, 0x61, 0x28, 0x5d, 0x35, 0x84, 0xab, 0x4c, 0xc2, 0x5d,
0xb5, 0xff, 0xab, 0x41, 0xe7, 0x34, 0x8c, 0x2f, 0xbd, 0xf0, 0x24, 0xf4, 0x16, 0x19, 0x8b, 0xa3,
0x1f, 0xa4, 0x79, 0x81, 0xfa, 0x41, 0x8a, 0x8e, 0x60, 0x2f, 0xbb, 0xc9, 0x28, 0x59, 0xba, 0xb3,
0x38, 0x9a, 0x07, 0x0b, 0x97, 0xe1, 0xa2, 0x58, 0x77, 0x05, 0x30, 0xe0, 0xf2, 0x61, 0x90, 0xa2,
0x43, 0x30, 0xc3, 0x78, 0xe6, 0x85, 0x2a, 0x55, 0xf4, 0xcc, 0x1d, 0x2e, 0x2f, 0x99, 0xcf, 0x60,
0x77, 0x95, 0x91, 0x54, 0x25, 0x8a, 0x09, 0xb3, 0xcd, 0xc4, 0x25, 0x8f, 0xc5, 0x30, 0xca, 0xc8,
0x6c, 0x95, 0x12, 0x77, 0xce, 0x2c, 0xe4, 0x53, 0xc6, 0xc0, 0xdb, 0xb9, 0x54, 0x98, 0xfd, 0x15,
0x1c, 0xd0, 0x74, 0x95, 0x51, 0xf7, 0x8a, 0xdc, 0x64, 0xee, 0x3c, 0x8d, 0x97, 0xee, 0x0f, 0x94,
0x26, 0x19, 0x8f, 0xb8, 0x8e, 0x11, 0x07, 0x5f, 0x93, 0x9b, 0xec, 0x24, 0x8d, 0x97, 0xbf, 0x65,
0x88, 0xfd, 0x77, 0x0d, 0x1a, 0xa3, 0x68, 0x1e, 0xa3, 0x4f, 0xa0, 0x93, 0x5e, 0x51, 0x37, 0x1f,
0x74, 0xc2, 0x75, 0x48, 0xaf, 0xe8, 0x5b, 0x39, 0xeb, 0x3e, 0x85, 0x2d, 0x2f, 0x49, 0x66, 0xee,
0xfa, 0x24, 0xee, 0x30, 0x59, 0x4e, 0xf9, 0x04, 0x3a, 0x5e, 0x12, 0x14, 0x0c, 0xe1, 0x33, 0x78,
0x49, 0x90, 0x13, 0x7e, 0x0d, 0x5b, 0x0b, 0x1e, 0x66, 0xe9, 0x45, 0xa3, 0x32, 0xa5, 0x95, 0x1c,
0xe0, 0xce, 0xa2, 0xdc, 0xd8, 0x7f, 0xd1, 0xa0, 0xe9, 0x5c, 0x93, 0xe8, 0xee, 0x37, 0x03, 0x47,
0x95, 0x37, 0xc3, 0x86, 0xc1, 0xce, 0x02, 0x92, 0x0f, 0x2f, 0xb6, 0x66, 0x32, 0x36, 0xb7, 0xb9,
0x19, 0x75, 0xcc, 0xd7, 0xe8, 0x29, 0x34, 0x7c, 0x8f, 0x7a, 0x77, 0xd7, 0x35, 0x87, 0xed, 0xbf,
0x6a, 0xd0, 0xe1, 0x47, 0xca, 0x9b, 0x77, 0x08, 0x4d, 0x76, 0xac, 0xb8, 0x7b, 0x9b, 0xed, 0x12,
0x84, 0xfc, 0x8e, 0xd6, 0xca, 0x3b, 0xba, 0x0f, 0x4d, 0xf5, 0x0a, 0x8a, 0x0d, 0xef, 0xb8, 0x41,
0x34, 0x23, 0xae, 0x62, 0xa2, 0xc1, 0x25, 0xec, 0x51, 0xc1, 0xe0, 0x55, 0x44, 0x83, 0x50, 0xc0,
0xe2, 0xd1, 0x61, 0x70, 0x09, 0x83, 0x6d, 0x13, 0x76, 0x4e, 0x09, 0x65, 0x99, 0xc5, 0xe4, 0x8f,
0x2b, 0x92, 0x51, 0xfb, 0x25, 0xec, 0x16, 0x92, 0x2c, 0x89, 0xa3, 0x8c, 0xa0, 0x4f, 0xa1, 0x11,
0x44, 0xf3, 0x58, 0x3e, 0x96, 0xb6, 0xcb, 0xf9, 0xcc, 0x48, 0x1c, 0xb2, 0x7f, 0x07, 0xbb, 0x67,
0x41, 0x46, 0x27, 0xb1, 0x9f, 0x49, 0x45, 0xe8, 0x4b, 0x68, 0xcf, 0xb9, 0xd3, 0xc2, 0xd9, 0x8e,
0xe2, 0x6c, 0xd1, 0xac, 0x71, 0x4e, 0x61, 0xf3, 0xc6, 0x27, 0xd4, 0x0b, 0x42, 0x9e, 0x0b, 0x1d,
0xcb, 0x9d, 0xfd, 0x12, 0xcc, 0x52, 0xb1, 0xb4, 0xa7, 0x07, 0x8d, 0x24, 0xf6, 0x73, 0xb5, 0x5b,
0xaa, 0x5a, 0xcc, 0x11, 0xfb, 0x33, 0xd8, 0x1b, 0x45, 0x59, 0x42, 0x66, 0xec, 0xc3, 0xdc, 0xa0,
0xca, 0xf8, 0xb6, 0x5f, 0x02, 0x52, 0x49, 0x52, 0xf9, 0xc7, 0x50, 0x4f, 0x62, 0x5f, 0xfa, 0xba,
0xae, 0x9b, 0x01, 0xf6, 0x1f, 0x60, 0x8f, 0x19, 0xc4, 0xdb, 0x69, 0xe1, 0xeb, 0x8b, 0xaa, 0xaf,
0xd5, 0x17, 0xe5, 0x07, 0x7a, 0xfb, 0x0d, 0x20, 0x55, 0xb9, 0x34, 0xe9, 0x19, 0xb4, 0xf8, 0x98,
0xc9, 0x95, 0x57, 0x5f, 0x48, 0x12, 0xb5, 0x9f, 0xc2, 0x7d, 0xe9, 0x90, 0x90, 0xdf, 0xe1, 0xf7,
0x37, 0xb0, 0xbf, 0x4e, 0x93, 0xc7, 0x14, 0xef, 0x30, 0xed, 0x3d, 0xef, 0x30, 0x7b, 0x00, 0xf7,
0x99, 0x89, 0x24, 0xe2, 0x15, 0xab, 0x64, 0xbb, 0x25, 0x9c, 0xbb, 0xf5, 0xa4, 0x56, 0xca, 0x1f,
0x4b, 0x8e, 0xfd, 0x2d, 0xec, 0xaf, 0x2b, 0x29, 0x3d, 0x25, 0x5c, 0x72, 0xcb, 0x53, 0x4e, 0xc4,
0x12, 0xb5, 0xff, 0xa1, 0xf1, 0xba, 0x3d, 0x8b, 0x17, 0x85, 0x01, 0x07, 0xd0, 0x62, 0xb3, 0xb0,
0xf0, 0xb4, 0x99, 0xc4, 0xfe, 0xc8, 0x47, 0x8f, 0x40, 0xcf, 0x07, 0x7a, 0xfe, 0xa3, 0x40, 0xce,
0x73, 0x76, 0x9f, 0xc2, 0x20, 0xe2, 0xf7, 0x49, 0x3b, 0x6c, 0x62, 0xb1, 0x61, 0xa9, 0x99, 0xc7,
0x61, 0x18, 0xbf, 0xe3, 0x77, 0x49, 0xc7, 0x72, 0x57, 0xb9, 0x67, 0xcd, 0xf7, 0xdf, 0xb3, 0x56,
0xf5, 0x9e, 0x3d, 0xe7, 0xb7, 0x4a, 0xd8, 0x2b, 0x7d, 0x2d, 0x8e, 0x17, 0x63, 0x58, 0x6c, 0x8e,
0x08, 0x18, 0xc5, 0xcf, 0x1a, 0x64, 0xc1, 0xfe, 0xe8, 0xbc, 0x7f, 0xea, 0xb8, 0xd3, 0xef, 0x27,
0x8e, 0xfb, 0xdd, 0xc5, 0xd0, 0x39, 0x19, 0x5d, 0x38, 0x43, 0xf3, 0x1e, 0xba, 0x0f, 0xbb, 0x0a,
0xd2, 0x9f, 0x4c, 0x06, 0xa6, 0x86, 0x0e, 0x60, 0x4f, 0x11, 0x0e, 0xc7, 0x83, 0xd7, 0x0e, 0x36,
0x6b, 0x08, 0xc1, 0x8e, 0x22, 0x1e, 0x0f, 0x46, 0x66, 0xfd, 0x68, 0x02, 0x7a, 0xfe, 0x6e, 0x46,
0x0f, 0xe1, 0x7e, 0x7f, 0x32, 0x71, 0xdf, 0x4c, 0xfb, 0xd3, 0xf5, 0x43, 0x0e, 0x60, 0xaf, 0x04,
0xf0, 0x77, 0x17, 0x17, 0xa3, 0x8b, 0x53, 0x53, 0x43, 0xfb, 0x60, 0x96, 0x62, 0xe7, 0xf7, 0xa3,
0xa9, 0x33, 0x34, 0x6b, 0x47, 0xff, 0xd6, 0x40, 0xcf, 0x9f, 0x53, 0x4c, 0xe5, 0x64, 0x3c, 0xdc,
0xa0, 0x72, 0x1f, 0xcc, 0x12, 0x70, 0xce, 0x5f, 0xe1, 0xef, 0xc7, 0xa6, 0xb6, 0x4e, 0x9f, 0x60,
0x67, 0xd2, 0xc7, 0xec, 0xa8, 0x1a, 0x7a, 0x00, 0xa8, 0x0a, 0x38, 0x43, 0xb3, 0xce, 0x2c, 0x2b,
0xe5, 0xb9, 0x65, 0x0d, 0xf4, 0x04, 0x1e, 0x95, 0xe2, 0xfe, 0xab, 0x31, 0x9e, 0x3a, 0xc3, 0xfc,
0x33, 0xb3, 0x59, 0x39, 0x5c, 0x18, 0xde, 0x5a, 0x3f, 0x63, 0xe8, 0x9c, 0x39, 0x53, 0xa6, 0xac,
0xbd, 0x7e, 0xc6, 0x69, 0x1f, 0xbf, 0xea, 0x9f, 0x3a, 0xa6, 0x7e, 0xf4, 0x53, 0x0d, 0x8c, 0xa2,
0x59, 0xb3, 0x0c, 0x39, 0x6f, 0x9d, 0x8b, 0xe9, 0xed, 0x0c, 0x3d, 0x86, 0x87, 0x0a, 0xc2, 0x34,
0x15, 0xf6, 0x6b, 0xc8, 0x86, 0x8f, 0x37, 0x83, 0xb9, 0xd5, 0x66, 0x0d, 0x75, 0xe1, 0x41, 0x85,
0xf3, 0x66, 0xda, 0xe7, 0x58, 0x1d, 0x3d, 0x82, 0x83, 0x0a, 0x26, 0xdd, 0x69, 0xa0, 0xcf, 0xa1,
0x57, 0x81, 0xa4, 0xed, 0xee, 0x60, 0x7c, 0x76, 0xe6, 0x0c, 0x18, 0xab, 0x59, 0x51, 0x2e, 0xd3,
0x89, 0x45, 0x40, 0xd6, 0x95, 0x33, 0x4c, 0x2a, 0x6f, 0xb3, 0x00, 0x2b, 0x90, 0xa8, 0xaa, 0xd1,
0xf9, 0x44, 0x98, 0xac, 0xa3, 0x8f, 0xc0, 0xba, 0x05, 0x63, 0xe7, 0x7c, 0xfc, 0xd6, 0x19, 0x9a,
0xc6, 0xf1, 0x9f, 0x1b, 0x60, 0x4c, 0x56, 0x97, 0x61, 0x30, 0xeb, 0x4f, 0x46, 0xe8, 0x5b, 0x68,
0xcb, 0x39, 0x83, 0x1e, 0x96, 0x83, 0x7d, 0x6d, 0x16, 0x75, 0xad, 0xdb, 0x80, 0xb8, 0x3c, 0xf6,
0x3d, 0xd4, 0x07, 0x3d, 0x1f, 0x0c, 0xa8, 0xe4, 0x55, 0x86, 0x50, 0xf7, 0xd1, 0x06, 0xa4, 0x50,
0x71, 0x0a, 0x50, 0x0e, 0x00, 0xd4, 0x55, 0xe6, 0x5a, 0x65, 0x74, 0x74, 0x1f, 0x6f, 0xc4, 0x54,
0x45, 0x65, 0xdb, 0x56, 0x14, 0xdd, 0x1a, 0x14, 0x8a, 0xa2, 0xdb, 0x7d, 0xde, 0xbe, 0x87, 0xce,
0x61, 0x4b, 0x6d, 0xcd, 0xe8, 0xa3, 0xea, 0xb9, 0x6a, 0x63, 0xef, 0x3e, 0xb9, 0x03, 0x2d, 0xd4,
0x8d, 0x61, 0x4b, 0x6d, 0xb3, 0x8a, 0xba, 0x0d, 0x2d, 0x5c, 0x51, 0xb7, 0xa9, 0x37, 0xdb, 0xf7,
0x7e, 0xa1, 0xa1, 0xdf, 0xf0, 0xa4, 0xb1, 0x36, 0xb6, 0x9e, 0x34, 0xa5, 0x11, 0xaf, 0x27, 0x4d,
0xed, 0x78, 0x4c, 0xc3, 0x65, 0x8b, 0xff, 0xd5, 0xf3, 0xf5, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff,
0x12, 0x12, 0x68, 0x59, 0xf7, 0x11, 0x00, 0x00,
}
| vendor/github.com/coreos/rkt/api/v1alpha/api.pb.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.013776998035609722,
0.00027088026399724185,
0.0001590756291989237,
0.00016802651225589216,
0.0010227462043985724
] |
{
"id": 2,
"code_window": [
"\t\"k8s.io/kubernetes/federation/pkg/kubefed\"\n",
"\t_ \"k8s.io/kubernetes/pkg/client/metrics/prometheus\" // for client metric registration\n",
"\tcmdutil \"k8s.io/kubernetes/pkg/kubectl/cmd/util\"\n",
"\t\"k8s.io/kubernetes/pkg/util/logs\"\n",
"\t_ \"k8s.io/kubernetes/pkg/version/prometheus\" // for version metric registration\n",
")\n",
"\n"
],
"labels": [
"keep",
"keep",
"keep",
"add",
"keep",
"keep",
"keep"
],
"after_edit": [
"\t\"k8s.io/kubernetes/pkg/version\"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 25
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by lister-gen
package v1
import (
"k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
v1 "k8s.io/kubernetes/pkg/apis/authorization/v1"
)
// SubjectAccessReviewLister helps list SubjectAccessReviews.
type SubjectAccessReviewLister interface {
// List lists all SubjectAccessReviews in the indexer.
List(selector labels.Selector) (ret []*v1.SubjectAccessReview, err error)
// Get retrieves the SubjectAccessReview from the index for a given name.
Get(name string) (*v1.SubjectAccessReview, error)
SubjectAccessReviewListerExpansion
}
// subjectAccessReviewLister implements the SubjectAccessReviewLister interface.
type subjectAccessReviewLister struct {
indexer cache.Indexer
}
// NewSubjectAccessReviewLister returns a new SubjectAccessReviewLister.
func NewSubjectAccessReviewLister(indexer cache.Indexer) SubjectAccessReviewLister {
return &subjectAccessReviewLister{indexer: indexer}
}
// List lists all SubjectAccessReviews in the indexer.
func (s *subjectAccessReviewLister) List(selector labels.Selector) (ret []*v1.SubjectAccessReview, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.SubjectAccessReview))
})
return ret, err
}
// Get retrieves the SubjectAccessReview from the index for a given name.
func (s *subjectAccessReviewLister) Get(name string) (*v1.SubjectAccessReview, error) {
key := &v1.SubjectAccessReview{ObjectMeta: meta_v1.ObjectMeta{Name: name}}
obj, exists, err := s.indexer.Get(key)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("subjectaccessreview"), name)
}
return obj.(*v1.SubjectAccessReview), nil
}
| pkg/client/listers/authorization/v1/subjectaccessreview.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0005095089436508715,
0.00022071320563554764,
0.00016083080845419317,
0.00017483050760347396,
0.00011810081196017563
] |
{
"id": 3,
"code_window": [
"\t_ \"k8s.io/kubernetes/pkg/version/prometheus\" // for version metric registration\n",
")\n",
"\n",
"func Run() error {\n",
"\tlogs.InitLogs()\n",
"\tdefer logs.FlushLogs()\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"const hyperkubeImageName = \"gcr.io/google_containers/hyperkube-amd64\"\n",
"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 28
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO(madhusdancs):
// 1. Make printSuccess prepend protocol/scheme to the IPs/hostnames.
// 2. Separate etcd container from API server pod as a first step towards enabling HA.
// 3. Make API server and controller manager replicas customizable via the HA work.
package init
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"sort"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
triple "k8s.io/client-go/util/cert/triple"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
"k8s.io/kubernetes/federation/apis/federation"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
"k8s.io/kubernetes/federation/pkg/kubefed/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/rbac"
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/version"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"gopkg.in/gcfg.v1"
)
const (
APIServerCN = "federation-apiserver"
ControllerManagerCN = "federation-controller-manager"
AdminCN = "admin"
HostClusterLocalDNSZoneName = "cluster.local."
APIServerNameSuffix = "apiserver"
CMNameSuffix = "controller-manager"
CredentialSuffix = "credentials"
KubeconfigNameSuffix = "kubeconfig"
// User name used by federation controller manager to make
// calls to federation API server.
ControllerManagerUser = "federation-controller-manager"
// Name of the ServiceAccount used by the federation controller manager
// to access the secrets in the host cluster.
ControllerManagerSA = "federation-controller-manager"
// Group name of the legacy/core API group
legacyAPIGroup = ""
lbAddrRetryInterval = 5 * time.Second
podWaitInterval = 2 * time.Second
apiserverServiceTypeFlag = "api-server-service-type"
apiserverAdvertiseAddressFlag = "api-server-advertise-address"
dnsProviderSecretName = "federation-dns-provider.conf"
apiServerSecurePortName = "https"
// Set the secure port to 8443 to avoid requiring root privileges
// to bind to port < 1000. The apiserver's service will still
// expose on port 443.
apiServerSecurePort = 8443
)
var (
init_long = templates.LongDesc(`
Initialize a federation control plane.
Federation control plane is hosted inside a Kubernetes
cluster. The host cluster must be specified using the
--host-cluster-context flag.`)
init_example = templates.Examples(`
# Initialize federation control plane for a federation
# named foo in the host cluster whose local kubeconfig
# context is bar.
kubefed init foo --host-cluster-context=bar`)
componentLabel = map[string]string{
"app": "federated-cluster",
}
apiserverSvcSelector = map[string]string{
"app": "federated-cluster",
"module": "federation-apiserver",
}
apiserverPodLabels = map[string]string{
"app": "federated-cluster",
"module": "federation-apiserver",
}
controllerManagerPodLabels = map[string]string{
"app": "federated-cluster",
"module": "federation-controller-manager",
}
hyperkubeImageName = "gcr.io/google_containers/hyperkube-amd64"
)
type initFederation struct {
commonOptions util.SubcommandOptions
options initFederationOptions
}
type initFederationOptions struct {
dnsZoneName string
image string
dnsProvider string
dnsProviderConfig string
etcdPVCapacity string
etcdPersistentStorage bool
dryRun bool
apiServerOverridesString string
apiServerOverrides map[string]string
controllerManagerOverridesString string
controllerManagerOverrides map[string]string
apiServerServiceTypeString string
apiServerServiceType v1.ServiceType
apiServerAdvertiseAddress string
apiServerEnableHTTPBasicAuth bool
apiServerEnableTokenAuth bool
}
func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {
defaultImage := fmt.Sprintf("%s:%s", hyperkubeImageName, version.Get())
flags.StringVar(&o.dnsZoneName, "dns-zone-name", "", "DNS suffix for this federation. Federated Service DNS names are published with this suffix.")
flags.StringVar(&o.image, "image", defaultImage, "Image to use for federation API server and controller manager binaries.")
flags.StringVar(&o.dnsProvider, "dns-provider", "", "Dns provider to be used for this deployment.")
flags.StringVar(&o.dnsProviderConfig, "dns-provider-config", "", "Config file path on local file system for configuring DNS provider.")
flags.StringVar(&o.etcdPVCapacity, "etcd-pv-capacity", "10Gi", "Size of persistent volume claim to be used for etcd.")
flags.BoolVar(&o.etcdPersistentStorage, "etcd-persistent-storage", true, "Use persistent volume for etcd. Defaults to 'true'.")
flags.BoolVar(&o.dryRun, "dry-run", false, "dry run without sending commands to server.")
flags.StringVar(&o.apiServerOverridesString, "apiserver-arg-overrides", "", "comma separated list of federation-apiserver arguments to override: Example \"--arg1=value1,--arg2=value2...\"")
flags.StringVar(&o.controllerManagerOverridesString, "controllermanager-arg-overrides", "", "comma separated list of federation-controller-manager arguments to override: Example \"--arg1=value1,--arg2=value2...\"")
flags.StringVar(&o.apiServerServiceTypeString, apiserverServiceTypeFlag, string(v1.ServiceTypeLoadBalancer), "The type of service to create for federation API server. Options: 'LoadBalancer' (default), 'NodePort'.")
flags.StringVar(&o.apiServerAdvertiseAddress, apiserverAdvertiseAddressFlag, "", "Preferred address to advertise api server nodeport service. Valid only if '"+apiserverServiceTypeFlag+"=NodePort'.")
flags.BoolVar(&o.apiServerEnableHTTPBasicAuth, "apiserver-enable-basic-auth", false, "Enables HTTP Basic authentication for the federation-apiserver. Defaults to false.")
flags.BoolVar(&o.apiServerEnableTokenAuth, "apiserver-enable-token-auth", false, "Enables token authentication for the federation-apiserver. Defaults to false.")
}
// NewCmdInit defines the `init` command that bootstraps a federation
// control plane inside a set of host clusters.
func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {
opts := &initFederation{}
cmd := &cobra.Command{
Use: "init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT",
Short: "init initializes a federation control plane",
Long: init_long,
Example: init_example,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(opts.Complete(cmd, args))
cmdutil.CheckErr(opts.Run(cmdOut, config))
},
}
flags := cmd.Flags()
opts.commonOptions.Bind(flags)
opts.options.Bind(flags)
return cmd
}
type entityKeyPairs struct {
ca *triple.KeyPair
server *triple.KeyPair
controllerManager *triple.KeyPair
admin *triple.KeyPair
}
type credentials struct {
username string
password string
token string
certEntKeyPairs *entityKeyPairs
}
// Complete ensures that options are valid and marshals them if necessary.
func (i *initFederation) Complete(cmd *cobra.Command, args []string) error {
if len(i.options.dnsProvider) == 0 {
return fmt.Errorf("--dns-provider is mandatory")
}
err := i.commonOptions.SetName(cmd, args)
if err != nil {
return err
}
i.options.apiServerServiceType = v1.ServiceType(i.options.apiServerServiceTypeString)
if i.options.apiServerServiceType != v1.ServiceTypeLoadBalancer && i.options.apiServerServiceType != v1.ServiceTypeNodePort {
return fmt.Errorf("invalid %s: %s, should be either %s or %s", apiserverServiceTypeFlag, i.options.apiServerServiceType, v1.ServiceTypeLoadBalancer, v1.ServiceTypeNodePort)
}
if i.options.apiServerAdvertiseAddress != "" {
ip := net.ParseIP(i.options.apiServerAdvertiseAddress)
if ip == nil {
return fmt.Errorf("invalid %s: %s, should be a valid ip address", apiserverAdvertiseAddressFlag, i.options.apiServerAdvertiseAddress)
}
if i.options.apiServerServiceType != v1.ServiceTypeNodePort {
return fmt.Errorf("%s should be passed only with '%s=NodePort'", apiserverAdvertiseAddressFlag, apiserverServiceTypeFlag)
}
}
i.options.apiServerOverrides, err = marshallOverrides(i.options.apiServerOverridesString)
if err != nil {
return fmt.Errorf("error marshalling --apiserver-arg-overrides: %v", err)
}
i.options.controllerManagerOverrides, err = marshallOverrides(i.options.controllerManagerOverridesString)
if err != nil {
return fmt.Errorf("error marshalling --controllermanager-arg-overrides: %v", err)
}
if i.options.dnsProviderConfig != "" {
if _, err := os.Stat(i.options.dnsProviderConfig); err != nil {
return fmt.Errorf("error reading file provided to --dns-provider-config flag, err: %v", err)
}
}
return nil
}
// Run initializes a federation control plane.
// See the design doc in https://github.com/kubernetes/kubernetes/pull/34484
// for details.
func (i *initFederation) Run(cmdOut io.Writer, config util.AdminConfig) error {
hostFactory := config.ClusterFactory(i.commonOptions.Host, i.commonOptions.Kubeconfig)
hostClientset, err := hostFactory.ClientSet()
if err != nil {
return err
}
rbacAvailable := true
rbacVersionedClientset, err := util.GetVersionedClientForRBACOrFail(hostFactory)
if err != nil {
if _, ok := err.(*util.NoRBACAPIError); !ok {
return err
}
// If the error is type NoRBACAPIError, We continue to create the rest of
// the resources, without the SA and roles (in the abscense of RBAC support).
rbacAvailable = false
}
serverName := fmt.Sprintf("%s-%s", i.commonOptions.Name, APIServerNameSuffix)
serverCredName := fmt.Sprintf("%s-%s", serverName, CredentialSuffix)
cmName := fmt.Sprintf("%s-%s", i.commonOptions.Name, CMNameSuffix)
cmKubeconfigName := fmt.Sprintf("%s-%s", cmName, KubeconfigNameSuffix)
var dnsProviderConfigBytes []byte
if i.options.dnsProviderConfig != "" {
dnsProviderConfigBytes, err = ioutil.ReadFile(i.options.dnsProviderConfig)
if err != nil {
return fmt.Errorf("Error reading file provided to --dns-provider-config flag, err: %v", err)
}
}
fmt.Fprintf(cmdOut, "Creating a namespace %s for federation system components...", i.commonOptions.FederationSystemNamespace)
glog.V(4).Infof("Creating a namespace %s for federation system components", i.commonOptions.FederationSystemNamespace)
_, err = createNamespace(hostClientset, i.commonOptions.Name, i.commonOptions.FederationSystemNamespace, i.options.dryRun)
if err != nil {
return err
}
fmt.Fprintln(cmdOut, " done")
fmt.Fprint(cmdOut, "Creating federation control plane service...")
glog.V(4).Info("Creating federation control plane service")
svc, ips, hostnames, err := createService(cmdOut, hostClientset, i.commonOptions.FederationSystemNamespace, serverName, i.commonOptions.Name, i.options.apiServerAdvertiseAddress, i.options.apiServerServiceType, i.options.dryRun)
if err != nil {
return err
}
fmt.Fprintln(cmdOut, " done")
glog.V(4).Infof("Created service named %s with IP addresses %v, hostnames %v", svc.Name, ips, hostnames)
fmt.Fprint(cmdOut, "Creating federation control plane objects (credentials, persistent volume claim)...")
glog.V(4).Info("Generating TLS certificates and credentials for communicating with the federation API server")
credentials, err := generateCredentials(i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, HostClusterLocalDNSZoneName, serverCredName, ips, hostnames, i.options.apiServerEnableHTTPBasicAuth, i.options.apiServerEnableTokenAuth, i.options.dryRun)
if err != nil {
return err
}
// Create the secret containing the credentials.
_, err = createAPIServerCredentialsSecret(hostClientset, i.commonOptions.FederationSystemNamespace, serverCredName, i.commonOptions.Name, credentials, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Certificates and credentials generated")
glog.V(4).Info("Creating an entry in the kubeconfig file with the certificate and credential data")
_, err = createControllerManagerKubeconfigSecret(hostClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, cmKubeconfigName, credentials.certEntKeyPairs, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Credentials secret successfully created")
glog.V(4).Info("Creating a persistent volume and a claim to store the federation API server's state, including etcd data")
var pvc *api.PersistentVolumeClaim
if i.options.etcdPersistentStorage {
pvc, err = createPVC(hostClientset, i.commonOptions.FederationSystemNamespace, svc.Name, i.commonOptions.Name, i.options.etcdPVCapacity, i.options.dryRun)
if err != nil {
return err
}
}
glog.V(4).Info("Persistent volume and claim created")
fmt.Fprintln(cmdOut, " done")
// Since only one IP address can be specified as advertise address,
// we arbitrarily pick the first available IP address
// Pick user provided apiserverAdvertiseAddress over other available IP addresses.
advertiseAddress := i.options.apiServerAdvertiseAddress
if advertiseAddress == "" && len(ips) > 0 {
advertiseAddress = ips[0]
}
fmt.Fprint(cmdOut, "Creating federation component deployments...")
glog.V(4).Info("Creating federation control plane components")
_, err = createAPIServer(hostClientset, i.commonOptions.FederationSystemNamespace, serverName, i.commonOptions.Name, i.options.image, advertiseAddress, serverCredName, i.options.apiServerEnableHTTPBasicAuth, i.options.apiServerEnableTokenAuth, i.options.apiServerOverrides, pvc, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation API server")
sa := &api.ServiceAccount{}
sa.Name = ""
// Create a service account and related RBAC roles if the host cluster has RBAC support.
// TODO: We must evaluate creating a separate service account even when RBAC support is missing
if rbacAvailable {
glog.V(4).Info("Creating service account for federation controller manager in the host cluster")
sa, err = createControllerManagerSA(rbacVersionedClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation controller manager service account")
glog.V(4).Info("Creating RBAC role and role bindings for the federation controller manager's service account")
_, _, err = createRoleBindings(rbacVersionedClientset, i.commonOptions.FederationSystemNamespace, sa.Name, i.commonOptions.Name, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created RBAC role and role bindings")
}
glog.V(4).Info("Creating a DNS provider config secret")
dnsProviderSecret, err := createDNSProviderConfigSecret(hostClientset, i.commonOptions.FederationSystemNamespace, dnsProviderSecretName, i.commonOptions.Name, dnsProviderConfigBytes, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created DNS provider config secret")
glog.V(4).Info("Creating federation controller manager deployment")
_, err = createControllerManager(hostClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, cmName, i.options.image, cmKubeconfigName, i.options.dnsZoneName, i.options.dnsProvider, i.options.dnsProviderConfig, sa.Name, dnsProviderSecret, i.options.controllerManagerOverrides, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation controller manager deployment")
fmt.Println(cmdOut, " done")
fmt.Fprint(cmdOut, "Updating kubeconfig...")
glog.V(4).Info("Updating kubeconfig")
// Pick the first ip/hostname to update the api server endpoint in kubeconfig and also to give information to user
// In case of NodePort Service for api server, ips are node external ips.
endpoint := ""
if len(ips) > 0 {
endpoint = ips[0]
} else if len(hostnames) > 0 {
endpoint = hostnames[0]
}
// If the service is nodeport, need to append the port to endpoint as it is non-standard port
if i.options.apiServerServiceType == v1.ServiceTypeNodePort {
endpoint = endpoint + ":" + strconv.Itoa(int(svc.Spec.Ports[0].NodePort))
}
err = updateKubeconfig(config, i.commonOptions.Name, endpoint, i.commonOptions.Kubeconfig, credentials, i.options.dryRun)
if err != nil {
glog.V(4).Infof("Failed to update kubeconfig: %v", err)
return err
}
fmt.Fprintln(cmdOut, " done")
glog.V(4).Info("Successfully updated kubeconfig")
if !i.options.dryRun {
fmt.Fprint(cmdOut, "Waiting for federation control plane to come up...")
glog.V(4).Info("Waiting for federation control plane to come up")
fedPods := []string{serverName, cmName}
err = waitForPods(cmdOut, hostClientset, fedPods, i.commonOptions.FederationSystemNamespace)
if err != nil {
return err
}
err = waitSrvHealthy(cmdOut, config, i.commonOptions.Name, i.commonOptions.Kubeconfig)
if err != nil {
return err
}
glog.V(4).Info("Federation control plane running")
fmt.Fprintln(cmdOut, " done")
return printSuccess(cmdOut, ips, hostnames, svc)
}
_, err = fmt.Fprintln(cmdOut, "Federation control plane runs (dry run)")
glog.V(4).Info("Federation control plane runs (dry run)")
return err
}
func createNamespace(clientset client.Interface, federationName, namespace string, dryRun bool) (*api.Namespace, error) {
ns := &api.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
}
if dryRun {
return ns, nil
}
return clientset.Core().Namespaces().Create(ns)
}
func createService(cmdOut io.Writer, clientset client.Interface, namespace, svcName, federationName, apiserverAdvertiseAddress string, apiserverServiceType v1.ServiceType, dryRun bool) (*api.Service, []string, []string, error) {
svc := &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcName,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: api.ServiceSpec{
Type: api.ServiceType(apiserverServiceType),
Selector: apiserverSvcSelector,
Ports: []api.ServicePort{
{
Name: "https",
Protocol: "TCP",
Port: 443,
TargetPort: intstr.FromString(apiServerSecurePortName),
},
},
},
}
if dryRun {
return svc, nil, nil, nil
}
var err error
svc, err = clientset.Core().Services(namespace).Create(svc)
ips := []string{}
hostnames := []string{}
if apiserverServiceType == v1.ServiceTypeLoadBalancer {
ips, hostnames, err = waitForLoadBalancerAddress(cmdOut, clientset, svc, dryRun)
} else {
if apiserverAdvertiseAddress != "" {
ips = append(ips, apiserverAdvertiseAddress)
} else {
ips, err = getClusterNodeIPs(clientset)
}
}
if err != nil {
return svc, nil, nil, err
}
return svc, ips, hostnames, err
}
func getClusterNodeIPs(clientset client.Interface) ([]string, error) {
preferredAddressTypes := []api.NodeAddressType{
api.NodeExternalIP,
}
nodeList, err := clientset.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
nodeAddresses := []string{}
for _, node := range nodeList.Items {
OuterLoop:
for _, addressType := range preferredAddressTypes {
for _, address := range node.Status.Addresses {
if address.Type == addressType {
nodeAddresses = append(nodeAddresses, address.Address)
break OuterLoop
}
}
}
}
return nodeAddresses, nil
}
func waitForLoadBalancerAddress(cmdOut io.Writer, clientset client.Interface, svc *api.Service, dryRun bool) ([]string, []string, error) {
ips := []string{}
hostnames := []string{}
if dryRun {
return ips, hostnames, nil
}
err := wait.PollImmediateInfinite(lbAddrRetryInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
pollSvc, err := clientset.Core().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if ings := pollSvc.Status.LoadBalancer.Ingress; len(ings) > 0 {
for _, ing := range ings {
if len(ing.IP) > 0 {
ips = append(ips, ing.IP)
}
if len(ing.Hostname) > 0 {
hostnames = append(hostnames, ing.Hostname)
}
}
if len(ips) > 0 || len(hostnames) > 0 {
return true, nil
}
}
return false, nil
})
if err != nil {
return nil, nil, err
}
return ips, hostnames, nil
}
func generateCredentials(svcNamespace, name, svcName, localDNSZoneName, serverCredName string, ips, hostnames []string, enableHTTPBasicAuth, enableTokenAuth, dryRun bool) (*credentials, error) {
credentials := credentials{
username: AdminCN,
}
if enableHTTPBasicAuth {
credentials.password = string(uuid.NewUUID())
}
if enableTokenAuth {
credentials.token = string(uuid.NewUUID())
}
entKeyPairs, err := genCerts(svcNamespace, name, svcName, localDNSZoneName, ips, hostnames)
if err != nil {
return nil, err
}
credentials.certEntKeyPairs = entKeyPairs
return &credentials, nil
}
func genCerts(svcNamespace, name, svcName, localDNSZoneName string, ips, hostnames []string) (*entityKeyPairs, error) {
ca, err := triple.NewCA(name)
if err != nil {
return nil, fmt.Errorf("failed to create CA key and certificate: %v", err)
}
server, err := triple.NewServerKeyPair(ca, APIServerCN, svcName, svcNamespace, localDNSZoneName, ips, hostnames)
if err != nil {
return nil, fmt.Errorf("failed to create federation API server key and certificate: %v", err)
}
cm, err := triple.NewClientKeyPair(ca, ControllerManagerCN, nil)
if err != nil {
return nil, fmt.Errorf("failed to create federation controller manager client key and certificate: %v", err)
}
admin, err := triple.NewClientKeyPair(ca, AdminCN, nil)
if err != nil {
return nil, fmt.Errorf("failed to create client key and certificate for an admin: %v", err)
}
return &entityKeyPairs{
ca: ca,
server: server,
controllerManager: cm,
admin: admin,
}, nil
}
func createAPIServerCredentialsSecret(clientset client.Interface, namespace, credentialsName, federationName string, credentials *credentials, dryRun bool) (*api.Secret, error) {
// Build the secret object with API server credentials.
data := map[string][]byte{
"ca.crt": certutil.EncodeCertPEM(credentials.certEntKeyPairs.ca.Cert),
"server.crt": certutil.EncodeCertPEM(credentials.certEntKeyPairs.server.Cert),
"server.key": certutil.EncodePrivateKeyPEM(credentials.certEntKeyPairs.server.Key),
}
if credentials.password != "" {
data["basicauth.csv"] = authFileContents(credentials.username, credentials.password)
}
if credentials.token != "" {
data["token.csv"] = authFileContents(credentials.username, credentials.token)
}
secret := &api.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: credentialsName,
Namespace: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Data: data,
}
if dryRun {
return secret, nil
}
// Boilerplate to create the secret in the host cluster.
return clientset.Core().Secrets(namespace).Create(secret)
}
func createControllerManagerKubeconfigSecret(clientset client.Interface, namespace, name, svcName, kubeconfigName string, entKeyPairs *entityKeyPairs, dryRun bool) (*api.Secret, error) {
config := kubeconfigutil.CreateWithCerts(
fmt.Sprintf("https://%s", svcName),
name,
ControllerManagerUser,
certutil.EncodeCertPEM(entKeyPairs.ca.Cert),
certutil.EncodePrivateKeyPEM(entKeyPairs.controllerManager.Key),
certutil.EncodeCertPEM(entKeyPairs.controllerManager.Cert),
)
return util.CreateKubeconfigSecret(clientset, config, namespace, kubeconfigName, name, "", dryRun)
}
func createPVC(clientset client.Interface, namespace, svcName, federationName, etcdPVCapacity string, dryRun bool) (*api.PersistentVolumeClaim, error) {
capacity, err := resource.ParseQuantity(etcdPVCapacity)
if err != nil {
return nil, err
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-etcd-claim", svcName),
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "yes",
federation.FederationNameAnnotation: federationName},
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceStorage: capacity,
},
},
},
}
if dryRun {
return pvc, nil
}
return clientset.Core().PersistentVolumeClaims(namespace).Create(pvc)
}
func createAPIServer(clientset client.Interface, namespace, name, federationName, image, advertiseAddress, credentialsName string, hasHTTPBasicAuthFile, hasTokenAuthFile bool, argOverrides map[string]string, pvc *api.PersistentVolumeClaim, dryRun bool) (*extensions.Deployment, error) {
command := []string{
"/hyperkube",
"federation-apiserver",
}
argsMap := map[string]string{
"--bind-address": "0.0.0.0",
"--etcd-servers": "http://localhost:2379",
"--secure-port": fmt.Sprintf("%d", apiServerSecurePort),
"--client-ca-file": "/etc/federation/apiserver/ca.crt",
"--tls-cert-file": "/etc/federation/apiserver/server.crt",
"--tls-private-key-file": "/etc/federation/apiserver/server.key",
"--admission-control": "NamespaceLifecycle",
}
if advertiseAddress != "" {
argsMap["--advertise-address"] = advertiseAddress
}
if hasHTTPBasicAuthFile {
argsMap["--basic-auth-file"] = "/etc/federation/apiserver/basicauth.csv"
}
if hasTokenAuthFile {
argsMap["--token-auth-file"] = "/etc/federation/apiserver/token.csv"
}
args := argMapsToArgStrings(argsMap, argOverrides)
command = append(command, args...)
dep := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: extensions.DeploymentSpec{
Replicas: 1,
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: apiserverPodLabels,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "apiserver",
Image: image,
Command: command,
Ports: []api.ContainerPort{
{
Name: apiServerSecurePortName,
ContainerPort: apiServerSecurePort,
},
{
Name: "local",
ContainerPort: 8080,
},
},
VolumeMounts: []api.VolumeMount{
{
Name: credentialsName,
MountPath: "/etc/federation/apiserver",
ReadOnly: true,
},
},
},
{
Name: "etcd",
Image: "gcr.io/google_containers/etcd:3.0.17",
Command: []string{
"/usr/local/bin/etcd",
"--data-dir",
"/var/etcd/data",
},
},
},
Volumes: []api.Volume{
{
Name: credentialsName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: credentialsName,
},
},
},
},
},
},
},
}
if pvc != nil {
dataVolumeName := "etcddata"
etcdVolume := api.Volume{
Name: dataVolumeName,
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
}
etcdVolumeMount := api.VolumeMount{
Name: dataVolumeName,
MountPath: "/var/etcd",
}
dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, etcdVolume)
for i, container := range dep.Spec.Template.Spec.Containers {
if container.Name == "etcd" {
dep.Spec.Template.Spec.Containers[i].VolumeMounts = append(dep.Spec.Template.Spec.Containers[i].VolumeMounts, etcdVolumeMount)
}
}
}
if dryRun {
return dep, nil
}
createdDep, err := clientset.Extensions().Deployments(namespace).Create(dep)
return createdDep, err
}
func createControllerManagerSA(clientset client.Interface, namespace, federationName string, dryRun bool) (*api.ServiceAccount, error) {
sa := &api.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: ControllerManagerSA,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
}
if dryRun {
return sa, nil
}
return clientset.Core().ServiceAccounts(namespace).Create(sa)
}
func createRoleBindings(clientset client.Interface, namespace, saName, federationName string, dryRun bool) (*rbac.Role, *rbac.RoleBinding, error) {
roleName := "federation-system:federation-controller-manager"
role := &rbac.Role{
// a role to use for bootstrapping the federation-controller-manager so it can access
// secrets in the host cluster to access other clusters.
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(legacyAPIGroup).Resources("secrets").RuleOrDie(),
},
}
rolebinding, err := rbac.NewRoleBinding(roleName, namespace).SAs(namespace, saName).Binding()
if err != nil {
return nil, nil, err
}
rolebinding.Labels = componentLabel
rolebinding.Annotations = map[string]string{federation.FederationNameAnnotation: federationName}
if dryRun {
return role, &rolebinding, nil
}
newRole, err := clientset.Rbac().Roles(namespace).Create(role)
if err != nil {
return nil, nil, err
}
newRolebinding, err := clientset.Rbac().RoleBindings(namespace).Create(&rolebinding)
return newRole, newRolebinding, err
}
func createControllerManager(clientset client.Interface, namespace, name, svcName, cmName, image, kubeconfigName, dnsZoneName, dnsProvider, dnsProviderConfig, saName string, dnsProviderSecret *api.Secret, argOverrides map[string]string, dryRun bool) (*extensions.Deployment, error) {
command := []string{
"/hyperkube",
"federation-controller-manager",
}
argsMap := map[string]string{
"--kubeconfig": "/etc/federation/controller-manager/kubeconfig",
}
argsMap["--master"] = fmt.Sprintf("https://%s", svcName)
argsMap["--dns-provider"] = dnsProvider
argsMap["--federation-name"] = name
argsMap["--zone-name"] = dnsZoneName
args := argMapsToArgStrings(argsMap, argOverrides)
command = append(command, args...)
dep := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Namespace: namespace,
Labels: componentLabel,
// We additionally update the details (in annotations) about the
// kube-dns config map which needs to be created in the clusters
// registering to this federation (at kubefed join).
// We wont otherwise have this information available at kubefed join.
Annotations: map[string]string{
// TODO: the name/domain name pair should ideally be checked for naming convention
// as done in kube-dns federation flags check.
// https://github.com/kubernetes/dns/blob/master/pkg/dns/federation/federation.go
// TODO v2: Until kube-dns can handle trailing periods we strip them all.
// See https://github.com/kubernetes/dns/issues/67
util.FedDomainMapKey: fmt.Sprintf("%s=%s", name, strings.TrimRight(dnsZoneName, ".")),
federation.FederationNameAnnotation: name,
},
},
Spec: extensions.DeploymentSpec{
Replicas: 1,
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Labels: controllerManagerPodLabels,
Annotations: map[string]string{federation.FederationNameAnnotation: name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "controller-manager",
Image: image,
Command: command,
VolumeMounts: []api.VolumeMount{
{
Name: kubeconfigName,
MountPath: "/etc/federation/controller-manager",
ReadOnly: true,
},
},
Env: []api.EnvVar{
{
Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
},
},
Volumes: []api.Volume{
{
Name: kubeconfigName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: kubeconfigName,
},
},
},
},
},
},
},
}
if saName != "" {
dep.Spec.Template.Spec.ServiceAccountName = saName
}
if dnsProviderSecret != nil {
dep = addDNSProviderConfig(dep, dnsProviderSecret.Name)
if dnsProvider == util.FedDNSProviderCoreDNS {
var err error
dep, err = addCoreDNSServerAnnotation(dep, dnsZoneName, dnsProviderConfig)
if err != nil {
return nil, err
}
}
}
if dryRun {
return dep, nil
}
return clientset.Extensions().Deployments(namespace).Create(dep)
}
func marshallOverrides(overrideArgString string) (map[string]string, error) {
if overrideArgString == "" {
return nil, nil
}
argsMap := make(map[string]string)
overrideArgs := strings.Split(overrideArgString, ",")
for _, overrideArg := range overrideArgs {
splitArg := strings.SplitN(overrideArg, "=", 2)
if len(splitArg) != 2 {
return nil, fmt.Errorf("wrong format for override arg: %s", overrideArg)
}
key := strings.TrimSpace(splitArg[0])
val := strings.TrimSpace(splitArg[1])
if len(key) == 0 {
return nil, fmt.Errorf("wrong format for override arg: %s, arg name cannot be empty", overrideArg)
}
argsMap[key] = val
}
return argsMap, nil
}
func argMapsToArgStrings(argsMap, overrides map[string]string) []string {
for key, val := range overrides {
argsMap[key] = val
}
args := []string{}
for key, value := range argsMap {
args = append(args, fmt.Sprintf("%s=%s", key, value))
}
// This is needed for the unit test deep copy to get an exact match
sort.Strings(args)
return args
}
func waitForPods(cmdOut io.Writer, clientset client.Interface, fedPods []string, namespace string) error {
err := wait.PollInfinite(podWaitInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
podCheck := len(fedPods)
podList, err := clientset.Core().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return false, nil
}
for _, pod := range podList.Items {
for _, fedPod := range fedPods {
if strings.HasPrefix(pod.Name, fedPod) && pod.Status.Phase == "Running" {
podCheck -= 1
}
}
//ensure that all pods are in running state or keep waiting
if podCheck == 0 {
return true, nil
}
}
return false, nil
})
return err
}
func waitSrvHealthy(cmdOut io.Writer, config util.AdminConfig, context, kubeconfig string) error {
fedClientSet, err := config.FederationClientset(context, kubeconfig)
if err != nil {
return err
}
fedDiscoveryClient := fedClientSet.Discovery()
err = wait.PollInfinite(podWaitInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
body, err := fedDiscoveryClient.RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err != nil {
return false, nil
}
if strings.EqualFold(string(body), "ok") {
return true, nil
}
return false, nil
})
return err
}
func printSuccess(cmdOut io.Writer, ips, hostnames []string, svc *api.Service) error {
svcEndpoints := append(ips, hostnames...)
endpoints := strings.Join(svcEndpoints, ", ")
if svc.Spec.Type == api.ServiceTypeNodePort {
endpoints = ips[0] + ":" + strconv.Itoa(int(svc.Spec.Ports[0].NodePort))
if len(ips) > 1 {
endpoints = endpoints + ", ..."
}
}
_, err := fmt.Fprintf(cmdOut, "Federation API server is running at: %s\n", endpoints)
return err
}
func updateKubeconfig(config util.AdminConfig, name, endpoint, kubeConfigPath string, credentials *credentials, dryRun bool) error {
po := config.PathOptions()
po.LoadingRules.ExplicitPath = kubeConfigPath
kubeconfig, err := po.GetStartingConfig()
if err != nil {
return err
}
// Populate API server endpoint info.
cluster := clientcmdapi.NewCluster()
// Prefix "https" as the URL scheme to endpoint.
if !strings.HasPrefix(endpoint, "https://") {
endpoint = fmt.Sprintf("https://%s", endpoint)
}
cluster.Server = endpoint
cluster.CertificateAuthorityData = certutil.EncodeCertPEM(credentials.certEntKeyPairs.ca.Cert)
// Populate credentials.
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = certutil.EncodeCertPEM(credentials.certEntKeyPairs.admin.Cert)
authInfo.ClientKeyData = certutil.EncodePrivateKeyPEM(credentials.certEntKeyPairs.admin.Key)
authInfo.Token = credentials.token
var httpBasicAuthInfo *clientcmdapi.AuthInfo
if credentials.password != "" {
httpBasicAuthInfo = clientcmdapi.NewAuthInfo()
httpBasicAuthInfo.Password = credentials.password
httpBasicAuthInfo.Username = credentials.username
}
// Populate context.
context := clientcmdapi.NewContext()
context.Cluster = name
context.AuthInfo = name
// Update the config struct with API server endpoint info,
// credentials and context.
kubeconfig.Clusters[name] = cluster
kubeconfig.AuthInfos[name] = authInfo
if httpBasicAuthInfo != nil {
kubeconfig.AuthInfos[fmt.Sprintf("%s-basic-auth", name)] = httpBasicAuthInfo
}
kubeconfig.Contexts[name] = context
if !dryRun {
// Write the update kubeconfig.
if err := clientcmd.ModifyConfig(po, *kubeconfig, true); err != nil {
return err
}
}
return nil
}
func createDNSProviderConfigSecret(clientset client.Interface, namespace, name, federationName string, dnsProviderConfigBytes []byte, dryRun bool) (*api.Secret, error) {
if dnsProviderConfigBytes == nil {
return nil, nil
}
secretSpec := &api.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Data: map[string][]byte{
name: dnsProviderConfigBytes,
},
}
var secret *api.Secret
var err error
if !dryRun {
secret, err = clientset.Core().Secrets(namespace).Create(secretSpec)
if err != nil {
return nil, err
}
}
return secret, nil
}
func addDNSProviderConfig(dep *extensions.Deployment, secretName string) *extensions.Deployment {
const (
dnsProviderConfigVolume = "config-volume"
dnsProviderConfigMountPath = "/etc/federation/dns-provider"
)
// Create a volume from dns-provider secret
volume := api.Volume{
Name: dnsProviderConfigVolume,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: secretName,
},
},
}
dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, volume)
// Mount dns-provider secret volume to controller-manager container
volumeMount := api.VolumeMount{
Name: dnsProviderConfigVolume,
MountPath: dnsProviderConfigMountPath,
ReadOnly: true,
}
dep.Spec.Template.Spec.Containers[0].VolumeMounts = append(dep.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
dep.Spec.Template.Spec.Containers[0].Command = append(dep.Spec.Template.Spec.Containers[0].Command, fmt.Sprintf("--dns-provider-config=%s/%s", dnsProviderConfigMountPath, secretName))
return dep
}
// authFileContents returns a CSV string containing the contents of an
// authentication file in the format required by the federation-apiserver.
func authFileContents(username, authSecret string) []byte {
return []byte(fmt.Sprintf("%s,%s,%s\n", authSecret, username, uuid.NewUUID()))
}
func addCoreDNSServerAnnotation(deployment *extensions.Deployment, dnsZoneName, dnsProviderConfig string) (*extensions.Deployment, error) {
var cfg coredns.Config
if err := gcfg.ReadFileInto(&cfg, dnsProviderConfig); err != nil {
return nil, err
}
deployment.Annotations[util.FedDNSZoneName] = dnsZoneName
deployment.Annotations[util.FedNameServer] = cfg.Global.CoreDNSEndpoints
deployment.Annotations[util.FedDNSProvider] = util.FedDNSProviderCoreDNS
return deployment, nil
}
| federation/pkg/kubefed/init/init.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.9957233667373657,
0.11582378298044205,
0.00016525080718565732,
0.00018067775818053633,
0.30071592330932617
] |
{
"id": 3,
"code_window": [
"\t_ \"k8s.io/kubernetes/pkg/version/prometheus\" // for version metric registration\n",
")\n",
"\n",
"func Run() error {\n",
"\tlogs.InitLogs()\n",
"\tdefer logs.FlushLogs()\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"const hyperkubeImageName = \"gcr.io/google_containers/hyperkube-amd64\"\n",
"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 28
} | #!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script performs disaster recovery of etcd from the backup data.
# Assumptions:
# - backup was done using etcdctl command:
# a) in case of etcd2
# $ etcdctl backup --data-dir=<dir>
# produced .snap and .wal files
# b) in case of etcd3
# $ etcdctl --endpoints=<address> snapshot save
# produced .db file
# - version.txt file is in the current directy (if it isn't it will be
# defaulted to "2.2.1/etcd2"). Based on this file, the script will
# decide to which version we are restoring (procedures are different
# for etcd2 and etcd3).
# - in case of etcd2 - *.snap and *.wal files are in current directory
# - in case of etcd3 - *.db file is in the current directory
# - the script is run as root
# - for event etcd, we only support clearing it - to do it, you need to
# set RESET_EVENT_ETCD=true env var.
set -o errexit
set -o nounset
set -o pipefail
# Version file contains information about current version in the format:
# <etcd binary version>/<etcd api mode> (e.g. "3.0.12/etcd3").
#
# If the file doesn't exist we assume "2.2.1/etcd2" configuration is
# the current one and create a file with such configuration.
# The restore procedure is chosen based on this information.
VERSION_FILE="version.txt"
# Make it possible to overwrite version file (or default version)
# with VERSION_CONTENTS env var.
if [ -n "${VERSION_CONTENTS:-}" ]; then
echo "${VERSION_CONTENTS}" > "${VERSION_FILE}"
fi
if [ ! -f "${VERSION_FILE}" ]; then
echo "2.2.1/etcd2" > "${VERSION_FILE}"
fi
VERSION_CONTENTS="$(cat ${VERSION_FILE})"
ETCD_VERSION="$(echo $VERSION_CONTENTS | cut -d '/' -f 1)"
ETCD_API="$(echo $VERSION_CONTENTS | cut -d '/' -f 2)"
# Name is used only in case of etcd3 mode, to appropriate set the metadata
# for the etcd data.
# NOTE: NAME HAS TO BE EQUAL TO WHAT WE USE IN --name flag when starting etcd.
NAME="${NAME:-etcd-$(hostname)}"
# Port on which etcd is exposed.
etcd_port=2379
event_etcd_port=4002
# Wait until both etcd instances are up
wait_for_etcd_up() {
port=$1
# TODO: As of 3.0.x etcd versions, all 2.* and 3.* versions return
# {"health": "true"} on /health endpoint in healthy case.
# However, we should come with a regex for it to avoid future break.
health_ok="{\"health\": \"true\"}"
for i in $(seq 120); do
# TODO: Is it enough to look into /health endpoint?
health=$(curl --silent http://127.0.0.1:${port}/health)
if [ "${health}" == "${health_ok}" ]; then
return 0
fi
sleep 1
done
return 1
}
# Wait until apiserver is up.
wait_for_cluster_healthy() {
for i in $(seq 120); do
cs_status=$(kubectl get componentstatuses -o template --template='{{range .items}}{{with index .conditions 0}}{{.type}}:{{.status}}{{end}}{{"\n"}}{{end}}') || true
componentstatuses=$(echo "${cs_status}" | grep -c 'Healthy:') || true
healthy=$(echo "${cs_status}" | grep -c 'Healthy:True') || true
if [ "${componentstatuses}" -eq "${healthy}" ]; then
return 0
fi
sleep 1
done
return 1
}
# Wait until etcd and apiserver pods are down.
wait_for_etcd_and_apiserver_down() {
for i in $(seq 120); do
etcd=$(docker ps | grep etcd | grep -v etcd-empty-dir | grep -v etcd-monitor | wc -l)
apiserver=$(docker ps | grep apiserver | wc -l)
# TODO: Theoretically it is possible, that apiserver and or etcd
# are currently down, but Kubelet is now restarting them and they
# will reappear again. We should avoid it.
if [ "${etcd}" -eq "0" -a "${apiserver}" -eq "0" ]; then
return 0
fi
sleep 1
done
return 1
}
# Move the manifest files to stop etcd and kube-apiserver
# while we swap the data out from under them.
MANIFEST_DIR="/etc/kubernetes/manifests"
MANIFEST_BACKUP_DIR="/etc/kubernetes/manifests-backups"
mkdir -p "${MANIFEST_BACKUP_DIR}"
echo "Moving etcd(s) & apiserver manifest files to ${MANIFEST_BACKUP_DIR}"
# If those files were already moved (e.g. during previous
# try of backup) don't fail on it.
mv "${MANIFEST_DIR}/kube-apiserver.manifest" "${MANIFEST_BACKUP_DIR}" || true
mv "${MANIFEST_DIR}/etcd.manifest" "${MANIFEST_BACKUP_DIR}" || true
mv "${MANIFEST_DIR}/etcd-events.manifest" "${MANIFEST_BACKUP_DIR}" || true
# Wait for the pods to be stopped
echo "Waiting for etcd and kube-apiserver to be down"
if ! wait_for_etcd_and_apiserver_down; then
# Couldn't kill etcd and apiserver.
echo "Downing etcd and apiserver failed"
exit 1
fi
# Create the sort of directory structure that etcd expects.
# If this directory already exists, remove it.
BACKUP_DIR="/var/tmp/backup"
rm -rf "${BACKUP_DIR}"
if [ "${ETCD_API}" == "etcd2" ]; then
echo "Preparing etcd backup data for restore"
# In v2 mode, we simply copy both snap and wal files to a newly created
# directory. After that, we start etcd with --force-new-cluster option
# that (according to the etcd documentation) is required to recover from
# a backup.
echo "Copying data to ${BACKUP_DIR} and restoring there"
mkdir -p "${BACKUP_DIR}/member/snap"
mkdir -p "${BACKUP_DIR}/member/wal"
# If the cluster is relatively new, there can be no .snap file.
mv *.snap "${BACKUP_DIR}/member/snap/" || true
mv *.wal "${BACKUP_DIR}/member/wal/"
# TODO(jsz): This won't work with HA setups (e.g. do we need to set --name flag)?
echo "Starting etcd ${ETCD_VERSION} to restore data"
image=$(docker run -d -v ${BACKUP_DIR}:/var/etcd/data \
--net=host -p ${etcd_port}:${etcd_port} \
"gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \
"/usr/local/bin/etcd --data-dir /var/etcd/data --force-new-cluster")
if [ "$?" -ne "0" ]; then
echo "Docker container didn't started correctly"
exit 1
fi
echo "Container ${image} created, waiting for etcd to report as healthy"
if ! wait_for_etcd_up "${etcd_port}"; then
echo "Etcd didn't come back correctly"
exit 1
fi
# Kill that etcd instance.
echo "Etcd healthy - killing ${image} container"
docker kill "${image}"
elif [ "${ETCD_API}" == "etcd3" ]; then
echo "Preparing etcd snapshot for restore"
mkdir -p "${BACKUP_DIR}"
echo "Copying data to ${BACKUP_DIR} and restoring there"
number_files=$(find . -maxdepth 1 -type f -name "*.db" | wc -l)
if [ "${number_files}" -ne "1" ]; then
echo "Incorrect number of *.db files - expected 1"
exit 1
fi
mv *.db "${BACKUP_DIR}/"
snapshot="$(ls ${BACKUP_DIR})"
# Run etcdctl snapshot restore command and wait until it is finished.
# setting with --name in the etcd manifest file and then it seems to work.
# TODO(jsz): This command may not work in case of HA.
image=$(docker run -d -v ${BACKUP_DIR}:/var/tmp/backup --env ETCDCTL_API=3 \
"gcr.io/google_containers/etcd:${ETCD_VERSION}" /bin/sh -c \
"/usr/local/bin/etcdctl snapshot restore ${BACKUP_DIR}/${snapshot} --name ${NAME} --initial-cluster ${NAME}=http://localhost:2380; mv /${NAME}.etcd/member /var/tmp/backup/")
if [ "$?" -ne "0" ]; then
echo "Docker container didn't started correctly"
exit 1
fi
echo "Prepare container exit code: $(docker wait ${image})"
rm -f "${BACKUP_DIR}/${snapshot}"
fi
# Also copy version.txt file.
cp "${VERSION_FILE}" "${BACKUP_DIR}"
# Find out if we are running GCI vs CVM.
export CVM=$(curl "http://metadata/computeMetadata/v1/instance/attributes/" -H "Metadata-Flavor: Google" |& grep -q gci; echo $?)
if [[ "$CVM" == "1" ]]; then
export MNT_DISK="/mnt/master-pd"
else
export MNT_DISK="/mnt/disks/master-pd"
fi
# Save the corrupted data (clean directory if it is already non-empty).
rm -rf "${MNT_DISK}/var/etcd-corrupted"
mkdir -p "${MNT_DISK}/var/etcd-corrupted"
echo "Saving corrupted data to ${MNT_DISK}/var/etcd-corrupted"
mv /var/etcd/data "${MNT_DISK}/var/etcd-corrupted"
# Replace the corrupted data dir with the resotred data.
echo "Copying restored data to /var/etcd/data"
mv "${BACKUP_DIR}" /var/etcd/data
if [ "${RESET_EVENT_ETCD:-}" == "true" ]; then
echo "Removing event-etcd corrupted data"
EVENTS_CORRUPTED_DIR="${MNT_DISK}/var/etcd-events-corrupted"
# Save the corrupted data (clean directory if it is already non-empty).
rm -rf "${EVENTS_CORRUPTED_DIR}"
mkdir -p "${EVENTS_CORRUPTED_DIR}"
mv /var/etcd/data-events "${EVENTS_CORRUPTED_DIR}"
fi
# Start etcd and kube-apiserver again.
echo "Restarting etcd and apiserver from restored snapshot"
mv "${MANIFEST_BACKUP_DIR}"/* "${MANIFEST_DIR}/"
rm -rf "${MANIFEST_BACKUP_DIR}"
# Verify that etcd is back.
echo "Waiting for etcd to come back"
if ! wait_for_etcd_up "${etcd_port}"; then
echo "Etcd didn't come back correctly"
exit 1
fi
# Verify that event etcd is back.
echo "Waiting for event etcd to come back"
if ! wait_for_etcd_up "${event_etcd_port}"; then
echo "Event etcd didn't come back correctly"
exit 1
fi
# Verify that kube-apiserver is back and cluster is healthy.
echo "Waiting for apiserver to come back"
if ! wait_for_cluster_healthy; then
echo "Apiserver didn't come back correctly"
exit 1
fi
echo "Cluster successfully restored!"
| cluster/restore-from-backup.sh | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.008166090585291386,
0.0008202497265301645,
0.00016685598529875278,
0.00017068709712475538,
0.0020261637400835752
] |
{
"id": 3,
"code_window": [
"\t_ \"k8s.io/kubernetes/pkg/version/prometheus\" // for version metric registration\n",
")\n",
"\n",
"func Run() error {\n",
"\tlogs.InitLogs()\n",
"\tdefer logs.FlushLogs()\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"const hyperkubeImageName = \"gcr.io/google_containers/hyperkube-amd64\"\n",
"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 28
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testserver
import (
"fmt"
"net"
"os"
"strconv"
"time"
"github.com/pborman/uuid"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/client-go/dynamic"
extensionsapiserver "k8s.io/kube-apiextensions-server/pkg/apiserver"
"k8s.io/kube-apiextensions-server/pkg/client/clientset/clientset"
"k8s.io/kube-apiextensions-server/pkg/cmd/server"
)
func DefaultServerConfig() (*extensionsapiserver.Config, error) {
port, err := FindFreeLocalPort()
if err != nil {
return nil, err
}
options := server.NewCustomResourceDefinitionsServerOptions(os.Stdout, os.Stderr)
options.RecommendedOptions.Audit.Path = "-"
options.RecommendedOptions.SecureServing.BindPort = port
options.RecommendedOptions.Authentication.SkipInClusterLookup = true
options.RecommendedOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1")
etcdURL, ok := os.LookupEnv("KUBE_INTEGRATION_ETCD_URL")
if !ok {
etcdURL = "http://127.0.0.1:2379"
}
options.RecommendedOptions.Etcd.StorageConfig.ServerList = []string{etcdURL}
options.RecommendedOptions.Etcd.StorageConfig.Prefix = uuid.New()
// TODO stop copying this
// because there isn't currently a way to disable authentication or authorization from options
// explode options.Config here
genericConfig := genericapiserver.NewConfig(extensionsapiserver.Codecs)
genericConfig.Authenticator = nil
genericConfig.Authorizer = authorizerfactory.NewAlwaysAllowAuthorizer()
if err := options.RecommendedOptions.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil {
return nil, fmt.Errorf("error creating self-signed certificates: %v", err)
}
if err := options.RecommendedOptions.Etcd.ApplyTo(genericConfig); err != nil {
return nil, err
}
if err := options.RecommendedOptions.SecureServing.ApplyTo(genericConfig); err != nil {
return nil, err
}
if err := options.RecommendedOptions.Audit.ApplyTo(genericConfig); err != nil {
return nil, err
}
if err := options.RecommendedOptions.Features.ApplyTo(genericConfig); err != nil {
return nil, err
}
customResourceDefinitionRESTOptionsGetter := extensionsapiserver.CustomResourceDefinitionRESTOptionsGetter{
StorageConfig: options.RecommendedOptions.Etcd.StorageConfig,
StoragePrefix: options.RecommendedOptions.Etcd.StorageConfig.Prefix,
EnableWatchCache: options.RecommendedOptions.Etcd.EnableWatchCache,
DefaultWatchCacheSize: options.RecommendedOptions.Etcd.DefaultWatchCacheSize,
EnableGarbageCollection: options.RecommendedOptions.Etcd.EnableGarbageCollection,
DeleteCollectionWorkers: options.RecommendedOptions.Etcd.DeleteCollectionWorkers,
}
customResourceDefinitionRESTOptionsGetter.StorageConfig.Codec = unstructured.UnstructuredJSONScheme
customResourceDefinitionRESTOptionsGetter.StorageConfig.Copier = extensionsapiserver.UnstructuredCopier{}
config := &extensionsapiserver.Config{
GenericConfig: genericConfig,
CustomResourceDefinitionRESTOptionsGetter: customResourceDefinitionRESTOptionsGetter,
}
return config, nil
}
func StartServer(config *extensionsapiserver.Config) (chan struct{}, clientset.Interface, dynamic.ClientPool, error) {
stopCh := make(chan struct{})
server, err := config.Complete().New(genericapiserver.EmptyDelegate)
if err != nil {
return nil, nil, nil, err
}
go func() {
err := server.GenericAPIServer.PrepareRun().Run(stopCh)
if err != nil {
close(stopCh)
panic(err)
}
}()
// wait until the server is healthy
err = wait.PollImmediate(30*time.Millisecond, 30*time.Second, func() (bool, error) {
healthClient, err := clientset.NewForConfig(server.GenericAPIServer.LoopbackClientConfig)
if err != nil {
return false, nil
}
healthResult := healthClient.Discovery().RESTClient().Get().AbsPath("/healthz").Do()
if healthResult.Error() != nil {
return false, nil
}
rawHealth, err := healthResult.Raw()
if err != nil {
return false, nil
}
if string(rawHealth) != "ok" {
return false, nil
}
return true, nil
})
if err != nil {
close(stopCh)
return nil, nil, nil, err
}
apiExtensionsClient, err := clientset.NewForConfig(server.GenericAPIServer.LoopbackClientConfig)
if err != nil {
close(stopCh)
return nil, nil, nil, err
}
bytes, _ := apiExtensionsClient.Discovery().RESTClient().Get().AbsPath("/apis/apiextensions.k8s.io/v1alpha1").DoRaw()
fmt.Print(string(bytes))
return stopCh, apiExtensionsClient, dynamic.NewDynamicClientPool(server.GenericAPIServer.LoopbackClientConfig), nil
}
func StartDefaultServer() (chan struct{}, clientset.Interface, dynamic.ClientPool, error) {
config, err := DefaultServerConfig()
if err != nil {
return nil, nil, nil, err
}
return StartServer(config)
}
// FindFreeLocalPort returns the number of an available port number on
// the loopback interface. Useful for determining the port to launch
// a server on. Error handling required - there is a non-zero chance
// that the returned port number will be bound by another process
// after this function returns.
func FindFreeLocalPort() (int, error) {
l, err := net.Listen("tcp", ":0")
if err != nil {
return 0, err
}
defer l.Close()
_, portStr, err := net.SplitHostPort(l.Addr().String())
if err != nil {
return 0, err
}
port, err := strconv.Atoi(portStr)
if err != nil {
return 0, err
}
return port, nil
}
| staging/src/k8s.io/kube-apiextensions-server/test/integration/testserver/start.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.996878981590271,
0.05615438520908356,
0.0001663503353483975,
0.00017477678193245083,
0.22816333174705505
] |
{
"id": 3,
"code_window": [
"\t_ \"k8s.io/kubernetes/pkg/version/prometheus\" // for version metric registration\n",
")\n",
"\n",
"func Run() error {\n",
"\tlogs.InitLogs()\n",
"\tdefer logs.FlushLogs()\n",
"\n"
],
"labels": [
"keep",
"keep",
"add",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"const hyperkubeImageName = \"gcr.io/google_containers/hyperkube-amd64\"\n",
"\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "add",
"edit_start_line_idx": 28
} | package dns
import (
"errors"
"net"
"strconv"
)
const hexDigit = "0123456789abcdef"
// Everything is assumed in ClassINET.
// SetReply creates a reply message from a request message.
func (dns *Msg) SetReply(request *Msg) *Msg {
dns.Id = request.Id
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
dns.Response = true
dns.Opcode = OpcodeQuery
dns.Rcode = RcodeSuccess
if len(request.Question) > 0 {
dns.Question = make([]Question, 1)
dns.Question[0] = request.Question[0]
}
return dns
}
// SetQuestion creates a question message, it sets the Question
// section, generates an Id and sets the RecursionDesired (RD)
// bit to true.
func (dns *Msg) SetQuestion(z string, t uint16) *Msg {
dns.Id = Id()
dns.RecursionDesired = true
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, t, ClassINET}
return dns
}
// SetNotify creates a notify message, it sets the Question
// section, generates an Id and sets the Authoritative (AA)
// bit to true.
func (dns *Msg) SetNotify(z string) *Msg {
dns.Opcode = OpcodeNotify
dns.Authoritative = true
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetRcode creates an error message suitable for the request.
func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg {
dns.SetReply(request)
dns.Rcode = rcode
return dns
}
// SetRcodeFormatError creates a message with FormError set.
func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg {
dns.Rcode = RcodeFormatError
dns.Opcode = OpcodeQuery
dns.Response = true
dns.Authoritative = false
dns.Id = request.Id
return dns
}
// SetUpdate makes the message a dynamic update message. It
// sets the ZONE section to: z, TypeSOA, ClassINET.
func (dns *Msg) SetUpdate(z string) *Msg {
dns.Id = Id()
dns.Response = false
dns.Opcode = OpcodeUpdate
dns.Compress = false // BIND9 cannot handle compression
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeSOA, ClassINET}
return dns
}
// SetIxfr creates message for requesting an IXFR.
func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Ns = make([]RR, 1)
s := new(SOA)
s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0}
s.Serial = serial
s.Ns = ns
s.Mbox = mbox
dns.Question[0] = Question{z, TypeIXFR, ClassINET}
dns.Ns[0] = s
return dns
}
// SetAxfr creates message for requesting an AXFR.
func (dns *Msg) SetAxfr(z string) *Msg {
dns.Id = Id()
dns.Question = make([]Question, 1)
dns.Question[0] = Question{z, TypeAXFR, ClassINET}
return dns
}
// SetTsig appends a TSIG RR to the message.
// This is only a skeleton TSIG RR that is added as the last RR in the
// additional section. The Tsig is calculated when the message is being send.
func (dns *Msg) SetTsig(z, algo string, fudge, timesigned int64) *Msg {
t := new(TSIG)
t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0}
t.Algorithm = algo
t.Fudge = 300
t.TimeSigned = uint64(timesigned)
t.OrigId = dns.Id
dns.Extra = append(dns.Extra, t)
return dns
}
// SetEdns0 appends a EDNS0 OPT RR to the message.
// TSIG should always the last RR in a message.
func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg {
e := new(OPT)
e.Hdr.Name = "."
e.Hdr.Rrtype = TypeOPT
e.SetUDPSize(udpsize)
if do {
e.SetDo()
}
dns.Extra = append(dns.Extra, e)
return dns
}
// IsTsig checks if the message has a TSIG record as the last record
// in the additional section. It returns the TSIG record found or nil.
func (dns *Msg) IsTsig() *TSIG {
if len(dns.Extra) > 0 {
if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG {
return dns.Extra[len(dns.Extra)-1].(*TSIG)
}
}
return nil
}
// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0
// record in the additional section will do. It returns the OPT record
// found or nil.
func (dns *Msg) IsEdns0() *OPT {
// EDNS0 is at the end of the additional section, start there.
// We might want to change this to *only* look at the last two
// records. So we see TSIG and/or OPT - this a slightly bigger
// change though.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
return dns.Extra[i].(*OPT)
}
}
return nil
}
// IsDomainName checks if s is a valid domain name, it returns the number of
// labels and true, when a domain name is valid. Note that non fully qualified
// domain name is considered valid, in this case the last label is counted in
// the number of labels. When false is returned the number of labels is not
// defined. Also note that this function is extremely liberal; almost any
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each
// label fits in 63 characters, but there is no length check for the entire
// string s. I.e. a domain name longer than 255 characters is considered valid.
func IsDomainName(s string) (labels int, ok bool) {
_, labels, err := packDomainName(s, nil, 0, nil, false)
return labels, err == nil
}
// IsSubDomain checks if child is indeed a child of the parent. If child and parent
// are the same domain true is returned as well.
func IsSubDomain(parent, child string) bool {
// Entire child is contained in parent
return CompareDomainName(parent, child) == CountLabel(parent)
}
// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet.
// The checking is performed on the binary payload.
func IsMsg(buf []byte) error {
// Header
if len(buf) < 12 {
return errors.New("dns: bad message header")
}
// Header: Opcode
// TODO(miek): more checks here, e.g. check all header bits.
return nil
}
// IsFqdn checks if a domain name is fully qualified.
func IsFqdn(s string) bool {
l := len(s)
if l == 0 {
return false
}
return s[l-1] == '.'
}
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
// This means the RRs need to have the same type, name, and class. Returns true
// if the RR set is valid, otherwise false.
func IsRRset(rrset []RR) bool {
if len(rrset) == 0 {
return false
}
if len(rrset) == 1 {
return true
}
rrHeader := rrset[0].Header()
rrType := rrHeader.Rrtype
rrClass := rrHeader.Class
rrName := rrHeader.Name
for _, rr := range rrset[1:] {
curRRHeader := rr.Header()
if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName {
// Mismatch between the records, so this is not a valid rrset for
//signing/verifying
return false
}
}
return true
}
// Fqdn return the fully qualified domain name from s.
// If s is already fully qualified, it behaves as the identity function.
func Fqdn(s string) string {
if IsFqdn(s) {
return s
}
return s + "."
}
// Copied from the official Go code.
// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP
// address suitable for reverse DNS (PTR) record lookups or an error if it fails
// to parse the IP address.
func ReverseAddr(addr string) (arpa string, err error) {
ip := net.ParseIP(addr)
if ip == nil {
return "", &Error{err: "unrecognized address: " + addr}
}
if ip.To4() != nil {
return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." +
strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil
}
// Must be IPv6
buf := make([]byte, 0, len(ip)*4+len("ip6.arpa."))
// Add it, in reverse, to the buffer
for i := len(ip) - 1; i >= 0; i-- {
v := ip[i]
buf = append(buf, hexDigit[v&0xF])
buf = append(buf, '.')
buf = append(buf, hexDigit[v>>4])
buf = append(buf, '.')
}
// Append "ip6.arpa." and return (buf already has the final .)
buf = append(buf, "ip6.arpa."...)
return string(buf), nil
}
// String returns the string representation for the type t.
func (t Type) String() string {
if t1, ok := TypeToString[uint16(t)]; ok {
return t1
}
return "TYPE" + strconv.Itoa(int(t))
}
// String returns the string representation for the class c.
func (c Class) String() string {
if c1, ok := ClassToString[uint16(c)]; ok {
return c1
}
return "CLASS" + strconv.Itoa(int(c))
}
// String returns the string representation for the name n.
func (n Name) String() string {
return sprintName(string(n))
}
| vendor/github.com/miekg/dns/defaults.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00027609732933342457,
0.00018477209960110486,
0.0001644046133151278,
0.00017151919018942863,
0.000025999226636486128
] |
{
"id": 4,
"code_window": [
"\tlogs.InitLogs()\n",
"\tdefer logs.FlushLogs()\n",
"\n",
"\tcmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)\n",
"\treturn cmd.Execute()\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tdefaultImage := fmt.Sprintf(\"%s:%s\", hyperkubeImageName, version.Get())\n",
"\tcmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr, defaultImage)\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "replace",
"edit_start_line_idx": 32
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubefed
import (
"io"
"k8s.io/apiserver/pkg/util/flag"
"k8s.io/client-go/tools/clientcmd"
kubefedinit "k8s.io/kubernetes/federation/pkg/kubefed/init"
"k8s.io/kubernetes/federation/pkg/kubefed/util"
kubectl "k8s.io/kubernetes/pkg/kubectl/cmd"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/spf13/cobra"
)
// NewKubeFedCommand creates the `kubefed` command and its nested children.
func NewKubeFedCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cobra.Command {
// Parent command to which all subcommands are added.
cmds := &cobra.Command{
Use: "kubefed",
Short: "kubefed controls a Kubernetes Cluster Federation",
Long: templates.LongDesc(`
kubefed controls a Kubernetes Cluster Federation.
Find more information at https://github.com/kubernetes/kubernetes.`),
Run: runHelp,
}
f.BindFlags(cmds.PersistentFlags())
f.BindExternalFlags(cmds.PersistentFlags())
// From this point and forward we get warnings on flags that contain "_" separators
cmds.SetGlobalNormalizationFunc(flag.WarnWordSepNormalizeFunc)
groups := templates.CommandGroups{
{
Message: "Basic Commands:",
Commands: []*cobra.Command{
kubefedinit.NewCmdInit(out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
NewCmdJoin(f, out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
NewCmdUnjoin(f, out, err, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
},
},
}
groups.Add(cmds)
filters := []string{
"options",
}
templates.ActsAsRootCommand(cmds, filters, groups...)
cmds.AddCommand(kubectl.NewCmdVersion(f, out))
cmds.AddCommand(kubectl.NewCmdOptions())
return cmds
}
func runHelp(cmd *cobra.Command, args []string) {
cmd.Help()
}
| federation/pkg/kubefed/kubefed.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0073695434257388115,
0.0019879431929439306,
0.0001689759810687974,
0.00019078378682024777,
0.002731893677264452
] |
{
"id": 4,
"code_window": [
"\tlogs.InitLogs()\n",
"\tdefer logs.FlushLogs()\n",
"\n",
"\tcmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)\n",
"\treturn cmd.Execute()\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tdefaultImage := fmt.Sprintf(\"%s:%s\", hyperkubeImageName, version.Get())\n",
"\tcmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr, defaultImage)\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "replace",
"edit_start_line_idx": 32
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
package policy // import "k8s.io/kubernetes/pkg/apis/policy"
| pkg/apis/policy/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017744439537636936,
0.00017633821698836982,
0.0001752320386003703,
0.00017633821698836982,
0.0000011061783879995346
] |
{
"id": 4,
"code_window": [
"\tlogs.InitLogs()\n",
"\tdefer logs.FlushLogs()\n",
"\n",
"\tcmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)\n",
"\treturn cmd.Execute()\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tdefaultImage := fmt.Sprintf(\"%s:%s\", hyperkubeImageName, version.Get())\n",
"\tcmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr, defaultImage)\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "replace",
"edit_start_line_idx": 32
} | apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
| cluster/addons/storage-class/gce/default.yaml | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0001756721903802827,
0.00017299928003922105,
0.00017032638425007463,
0.00017299928003922105,
0.0000026729030651040375
] |
{
"id": 4,
"code_window": [
"\tlogs.InitLogs()\n",
"\tdefer logs.FlushLogs()\n",
"\n",
"\tcmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)\n",
"\treturn cmd.Execute()\n",
"}"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"keep",
"keep"
],
"after_edit": [
"\tdefaultImage := fmt.Sprintf(\"%s:%s\", hyperkubeImageName, version.Get())\n",
"\tcmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr, defaultImage)\n"
],
"file_path": "federation/cmd/kubefed/app/kubefed.go",
"type": "replace",
"edit_start_line_idx": 32
} |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| third_party/intemp/LICENSE | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017806148389354348,
0.00017429745639674366,
0.00017024276894517243,
0.00017444782133679837,
0.0000018764357037071022
] |
{
"id": 5,
"code_window": [
" \"//pkg/apis/extensions:go_default_library\",\n",
" \"//pkg/apis/rbac:go_default_library\",\n",
" \"//pkg/client/clientset_generated/internalclientset:go_default_library\",\n",
" \"//pkg/kubectl/cmd/templates:go_default_library\",\n",
" \"//pkg/kubectl/cmd/util:go_default_library\",\n",
" \"//pkg/version:go_default_library\",\n",
" \"//vendor/github.com/golang/glog:go_default_library\",\n",
" \"//vendor/github.com/spf13/cobra:go_default_library\",\n",
" \"//vendor/github.com/spf13/pflag:go_default_library\",\n",
" \"//vendor/gopkg.in/gcfg.v1:go_default_library\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/BUILD",
"type": "replace",
"edit_start_line_idx": 26
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"os"
"k8s.io/kubernetes/federation/pkg/kubefed"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/logs"
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
)
func Run() error {
logs.InitLogs()
defer logs.FlushLogs()
cmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)
return cmd.Execute()
}
| federation/cmd/kubefed/app/kubefed.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0004358253499958664,
0.00023856962798163295,
0.0001636933593545109,
0.0001773799303919077,
0.00011402289237594232
] |
{
"id": 5,
"code_window": [
" \"//pkg/apis/extensions:go_default_library\",\n",
" \"//pkg/apis/rbac:go_default_library\",\n",
" \"//pkg/client/clientset_generated/internalclientset:go_default_library\",\n",
" \"//pkg/kubectl/cmd/templates:go_default_library\",\n",
" \"//pkg/kubectl/cmd/util:go_default_library\",\n",
" \"//pkg/version:go_default_library\",\n",
" \"//vendor/github.com/golang/glog:go_default_library\",\n",
" \"//vendor/github.com/spf13/cobra:go_default_library\",\n",
" \"//vendor/github.com/spf13/pflag:go_default_library\",\n",
" \"//vendor/gopkg.in/gcfg.v1:go_default_library\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/BUILD",
"type": "replace",
"edit_start_line_idx": 26
} | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"fmt"
"strings"
"time"
"github.com/golang/glog"
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
utilexec "k8s.io/kubernetes/pkg/util/exec"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
// HostportSyncer takes a list of PodPortMappings and implements hostport all at once
type HostportSyncer interface {
// SyncHostports gathers all hostports on node and setup iptables rules to enable them.
// On each invocation existing ports are synced and stale rules are deleted.
SyncHostports(natInterfaceName string, activePodPortMappings []*PodPortMapping) error
// OpenPodHostportsAndSync opens hostports for a new PodPortMapping, gathers all hostports on
// node, sets up iptables rules enable them. On each invocation existing ports are synced and stale rules are deleted.
// 'newPortMapping' must also be present in 'activePodPortMappings'.
OpenPodHostportsAndSync(newPortMapping *PodPortMapping, natInterfaceName string, activePodPortMappings []*PodPortMapping) error
}
type hostportSyncer struct {
hostPortMap map[hostport]closeable
iptables utiliptables.Interface
portOpener hostportOpener
}
func NewHostportSyncer() HostportSyncer {
iptInterface := utiliptables.New(utilexec.New(), utildbus.New(), utiliptables.ProtocolIpv4)
return &hostportSyncer{
hostPortMap: make(map[hostport]closeable),
iptables: iptInterface,
portOpener: openLocalPort,
}
}
type targetPod struct {
podFullName string
podIP string
}
func (hp *hostport) String() string {
return fmt.Sprintf("%s:%d", hp.protocol, hp.port)
}
//openPodHostports opens all hostport for pod and returns the map of hostport and socket
func (h *hostportSyncer) openHostports(podHostportMapping *PodPortMapping) error {
var retErr error
ports := make(map[hostport]closeable)
for _, port := range podHostportMapping.PortMappings {
if port.HostPort <= 0 {
// Assume hostport is not specified in this portmapping. So skip
continue
}
hp := hostport{
port: port.HostPort,
protocol: strings.ToLower(string(port.Protocol)),
}
socket, err := h.portOpener(&hp)
if err != nil {
retErr = fmt.Errorf("cannot open hostport %d for pod %s: %v", port.HostPort, getPodFullName(podHostportMapping), err)
break
}
ports[hp] = socket
}
// If encounter any error, close all hostports that just got opened.
if retErr != nil {
for hp, socket := range ports {
if err := socket.Close(); err != nil {
glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podHostportMapping), err)
}
}
return retErr
}
for hostPort, socket := range ports {
h.hostPortMap[hostPort] = socket
}
return nil
}
func getPodFullName(pod *PodPortMapping) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format), while allowed in the container name format.
return pod.Name + "_" + pod.Namespace
}
// gatherAllHostports returns all hostports that should be presented on node,
// given the list of pods running on that node and ignoring host network
// pods (which don't need hostport <-> container port mapping).
func gatherAllHostports(activePodPortMappings []*PodPortMapping) (map[*PortMapping]targetPod, error) {
podHostportMap := make(map[*PortMapping]targetPod)
for _, pm := range activePodPortMappings {
if pm.IP.To4() == nil {
return nil, fmt.Errorf("Invalid or missing pod %s IP", getPodFullName(pm))
}
// should not handle hostports for hostnetwork pods
if pm.HostNetwork {
continue
}
for _, port := range pm.PortMappings {
if port.HostPort != 0 {
podHostportMap[port] = targetPod{podFullName: getPodFullName(pm), podIP: pm.IP.String()}
}
}
}
return podHostportMap, nil
}
// Join all words with spaces, terminate with newline and write to buf.
func writeLine(buf *bytes.Buffer, words ...string) {
buf.WriteString(strings.Join(words, " ") + "\n")
}
//hostportChainName takes containerPort for a pod and returns associated iptables chain.
// This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-SVC-". We do
// this because IPTables Chain Names must be <= 28 chars long, and the longer
// they are the harder they are to read.
func hostportChainName(pm *PortMapping, podFullName string) utiliptables.Chain {
hash := sha256.Sum256([]byte(string(pm.HostPort) + string(pm.Protocol) + podFullName))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}
// OpenPodHostportsAndSync opens hostports for a new PodPortMapping, gathers all hostports on
// node, sets up iptables rules enable them. And finally clean up stale hostports.
// 'newPortMapping' must also be present in 'activePodPortMappings'.
func (h *hostportSyncer) OpenPodHostportsAndSync(newPortMapping *PodPortMapping, natInterfaceName string, activePodPortMappings []*PodPortMapping) error {
// try to open pod host port if specified
if err := h.openHostports(newPortMapping); err != nil {
return err
}
// Add the new pod to active pods if it's not present.
var found bool
for _, pm := range activePodPortMappings {
if pm.Namespace == newPortMapping.Namespace && pm.Name == newPortMapping.Name {
found = true
break
}
}
if !found {
activePodPortMappings = append(activePodPortMappings, newPortMapping)
}
return h.SyncHostports(natInterfaceName, activePodPortMappings)
}
// SyncHostports gathers all hostports on node and setup iptables rules enable them. And finally clean up stale hostports
func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMappings []*PodPortMapping) error {
start := time.Now()
defer func() {
glog.V(4).Infof("syncHostportsRules took %v", time.Since(start))
}()
hostportPodMap, err := gatherAllHostports(activePodPortMappings)
if err != nil {
return err
}
// Ensure KUBE-HOSTPORTS chains
ensureKubeHostportChains(h.iptables, natInterfaceName)
// Get iptables-save output so we can check for existing chains and rules.
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
existingNATChains := make(map[utiliptables.Chain]string)
iptablesSaveRaw, err := h.iptables.Save(utiliptables.TableNAT)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, iptablesSaveRaw)
}
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
writeLine(natChains, "*nat")
// Make sure we keep stats for the top-level chains, if they existed
// (which most should have because we created them above).
if chain, ok := existingNATChains[kubeHostportsChain]; ok {
writeLine(natChains, chain)
} else {
writeLine(natChains, utiliptables.MakeChainLine(kubeHostportsChain))
}
// Accumulate NAT chains to keep.
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
for port, target := range hostportPodMap {
protocol := strings.ToLower(string(port.Protocol))
hostportChain := hostportChainName(port, target.podFullName)
if chain, ok := existingNATChains[hostportChain]; ok {
writeLine(natChains, chain)
} else {
writeLine(natChains, utiliptables.MakeChainLine(hostportChain))
}
activeNATChains[hostportChain] = true
// Redirect to hostport chain
args := []string{
"-A", string(kubeHostportsChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-m", protocol, "-p", protocol,
"--dport", fmt.Sprintf("%d", port.HostPort),
"-j", string(hostportChain),
}
writeLine(natRules, args...)
// Assuming kubelet is syncing iptables KUBE-MARK-MASQ chain
// If the request comes from the pod that is serving the hostport, then SNAT
args = []string{
"-A", string(hostportChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-s", target.podIP, "-j", string(iptablesproxy.KubeMarkMasqChain),
}
writeLine(natRules, args...)
// Create hostport chain to DNAT traffic to final destination
// IPTables will maintained the stats for this chain
args = []string{
"-A", string(hostportChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-m", protocol, "-p", protocol,
"-j", "DNAT", fmt.Sprintf("--to-destination=%s:%d", target.podIP, port.ContainerPort),
}
writeLine(natRules, args...)
}
// Delete chains no longer in use.
for chain := range existingNATChains {
if !activeNATChains[chain] {
chainString := string(chain)
if !strings.HasPrefix(chainString, kubeHostportChainPrefix) {
// Ignore chains that aren't ours.
continue
}
// We must (as per iptables) write a chain-line for it, which has
// the nice effect of flushing the chain. Then we can remove the
// chain.
writeLine(natChains, existingNATChains[chain])
writeLine(natRules, "-X", chainString)
}
}
writeLine(natRules, "COMMIT")
natLines := append(natChains.Bytes(), natRules.Bytes()...)
glog.V(3).Infof("Restoring iptables rules: %s", natLines)
err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
return fmt.Errorf("Failed to execute iptables-restore: %v", err)
}
h.cleanupHostportMap(hostportPodMap)
return nil
}
// cleanupHostportMap closes obsolete hostports
func (h *hostportSyncer) cleanupHostportMap(containerPortMap map[*PortMapping]targetPod) {
// compute hostports that are supposed to be open
currentHostports := make(map[hostport]bool)
for containerPort := range containerPortMap {
hp := hostport{
port: containerPort.HostPort,
protocol: strings.ToLower(string(containerPort.Protocol)),
}
currentHostports[hp] = true
}
// close and delete obsolete hostports
for hp, socket := range h.hostPortMap {
if _, ok := currentHostports[hp]; !ok {
socket.Close()
glog.V(3).Infof("Closed local port %s", hp.String())
delete(h.hostPortMap, hp)
}
}
}
| pkg/kubelet/network/hostport/hostport_syncer.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00036410431494005024,
0.00017960935656446964,
0.00015968990919645876,
0.00016857766604516655,
0.000037080335459904745
] |
{
"id": 5,
"code_window": [
" \"//pkg/apis/extensions:go_default_library\",\n",
" \"//pkg/apis/rbac:go_default_library\",\n",
" \"//pkg/client/clientset_generated/internalclientset:go_default_library\",\n",
" \"//pkg/kubectl/cmd/templates:go_default_library\",\n",
" \"//pkg/kubectl/cmd/util:go_default_library\",\n",
" \"//pkg/version:go_default_library\",\n",
" \"//vendor/github.com/golang/glog:go_default_library\",\n",
" \"//vendor/github.com/spf13/cobra:go_default_library\",\n",
" \"//vendor/github.com/spf13/pflag:go_default_library\",\n",
" \"//vendor/gopkg.in/gcfg.v1:go_default_library\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/BUILD",
"type": "replace",
"edit_start_line_idx": 26
} | package models
type Statistic struct {
Name string `json:"name"`
Tags map[string]string `json:"tags"`
Values map[string]interface{} `json:"values"`
}
func NewStatistic(name string) Statistic {
return Statistic{
Name: name,
Tags: make(map[string]string),
Values: make(map[string]interface{}),
}
}
// StatisticTags is a map that can be merged with others without causing
// mutations to either map.
type StatisticTags map[string]string
// Merge creates a new map containing the merged contents of tags and t.
// If both tags and the receiver map contain the same key, the value in tags
// is used in the resulting map.
//
// Merge always returns a usable map.
func (t StatisticTags) Merge(tags map[string]string) map[string]string {
// Add everything in tags to the result.
out := make(map[string]string, len(tags))
for k, v := range tags {
out[k] = v
}
// Only add values from t that don't appear in tags.
for k, v := range t {
if _, ok := tags[k]; !ok {
out[k] = v
}
}
return out
}
| vendor/github.com/influxdata/influxdb/models/statistic.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0001717461709631607,
0.00016958403284661472,
0.00016452740237582475,
0.00017048917652573436,
0.0000026585505565890344
] |
{
"id": 5,
"code_window": [
" \"//pkg/apis/extensions:go_default_library\",\n",
" \"//pkg/apis/rbac:go_default_library\",\n",
" \"//pkg/client/clientset_generated/internalclientset:go_default_library\",\n",
" \"//pkg/kubectl/cmd/templates:go_default_library\",\n",
" \"//pkg/kubectl/cmd/util:go_default_library\",\n",
" \"//pkg/version:go_default_library\",\n",
" \"//vendor/github.com/golang/glog:go_default_library\",\n",
" \"//vendor/github.com/spf13/cobra:go_default_library\",\n",
" \"//vendor/github.com/spf13/pflag:go_default_library\",\n",
" \"//vendor/gopkg.in/gcfg.v1:go_default_library\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/BUILD",
"type": "replace",
"edit_start_line_idx": 26
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"direct.go",
"per_host.go",
"proxy.go",
"socks5.go",
],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| vendor/golang.org/x/net/proxy/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0003303142439108342,
0.0002192853862652555,
0.00017249753000214696,
0.00018716488557402045,
0.00006504030170617625
] |
{
"id": 6,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/apis/rbac\"\n",
"\tclient \"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset\"\n",
"\t\"k8s.io/kubernetes/pkg/kubectl/cmd/templates\"\n",
"\tcmdutil \"k8s.io/kubernetes/pkg/kubectl/cmd/util\"\n",
"\t\"k8s.io/kubernetes/pkg/version\"\n",
"\n",
"\t\"github.com/golang/glog\"\n",
"\t\"github.com/spf13/cobra\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 53
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO(madhusdancs):
// 1. Make printSuccess prepend protocol/scheme to the IPs/hostnames.
// 2. Separate etcd container from API server pod as a first step towards enabling HA.
// 3. Make API server and controller manager replicas customizable via the HA work.
package init
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"sort"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
triple "k8s.io/client-go/util/cert/triple"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
"k8s.io/kubernetes/federation/apis/federation"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
"k8s.io/kubernetes/federation/pkg/kubefed/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/rbac"
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/version"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"gopkg.in/gcfg.v1"
)
const (
APIServerCN = "federation-apiserver"
ControllerManagerCN = "federation-controller-manager"
AdminCN = "admin"
HostClusterLocalDNSZoneName = "cluster.local."
APIServerNameSuffix = "apiserver"
CMNameSuffix = "controller-manager"
CredentialSuffix = "credentials"
KubeconfigNameSuffix = "kubeconfig"
// User name used by federation controller manager to make
// calls to federation API server.
ControllerManagerUser = "federation-controller-manager"
// Name of the ServiceAccount used by the federation controller manager
// to access the secrets in the host cluster.
ControllerManagerSA = "federation-controller-manager"
// Group name of the legacy/core API group
legacyAPIGroup = ""
lbAddrRetryInterval = 5 * time.Second
podWaitInterval = 2 * time.Second
apiserverServiceTypeFlag = "api-server-service-type"
apiserverAdvertiseAddressFlag = "api-server-advertise-address"
dnsProviderSecretName = "federation-dns-provider.conf"
apiServerSecurePortName = "https"
// Set the secure port to 8443 to avoid requiring root privileges
// to bind to port < 1000. The apiserver's service will still
// expose on port 443.
apiServerSecurePort = 8443
)
var (
init_long = templates.LongDesc(`
Initialize a federation control plane.
Federation control plane is hosted inside a Kubernetes
cluster. The host cluster must be specified using the
--host-cluster-context flag.`)
init_example = templates.Examples(`
# Initialize federation control plane for a federation
# named foo in the host cluster whose local kubeconfig
# context is bar.
kubefed init foo --host-cluster-context=bar`)
componentLabel = map[string]string{
"app": "federated-cluster",
}
apiserverSvcSelector = map[string]string{
"app": "federated-cluster",
"module": "federation-apiserver",
}
apiserverPodLabels = map[string]string{
"app": "federated-cluster",
"module": "federation-apiserver",
}
controllerManagerPodLabels = map[string]string{
"app": "federated-cluster",
"module": "federation-controller-manager",
}
hyperkubeImageName = "gcr.io/google_containers/hyperkube-amd64"
)
type initFederation struct {
commonOptions util.SubcommandOptions
options initFederationOptions
}
type initFederationOptions struct {
dnsZoneName string
image string
dnsProvider string
dnsProviderConfig string
etcdPVCapacity string
etcdPersistentStorage bool
dryRun bool
apiServerOverridesString string
apiServerOverrides map[string]string
controllerManagerOverridesString string
controllerManagerOverrides map[string]string
apiServerServiceTypeString string
apiServerServiceType v1.ServiceType
apiServerAdvertiseAddress string
apiServerEnableHTTPBasicAuth bool
apiServerEnableTokenAuth bool
}
func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {
defaultImage := fmt.Sprintf("%s:%s", hyperkubeImageName, version.Get())
flags.StringVar(&o.dnsZoneName, "dns-zone-name", "", "DNS suffix for this federation. Federated Service DNS names are published with this suffix.")
flags.StringVar(&o.image, "image", defaultImage, "Image to use for federation API server and controller manager binaries.")
flags.StringVar(&o.dnsProvider, "dns-provider", "", "Dns provider to be used for this deployment.")
flags.StringVar(&o.dnsProviderConfig, "dns-provider-config", "", "Config file path on local file system for configuring DNS provider.")
flags.StringVar(&o.etcdPVCapacity, "etcd-pv-capacity", "10Gi", "Size of persistent volume claim to be used for etcd.")
flags.BoolVar(&o.etcdPersistentStorage, "etcd-persistent-storage", true, "Use persistent volume for etcd. Defaults to 'true'.")
flags.BoolVar(&o.dryRun, "dry-run", false, "dry run without sending commands to server.")
flags.StringVar(&o.apiServerOverridesString, "apiserver-arg-overrides", "", "comma separated list of federation-apiserver arguments to override: Example \"--arg1=value1,--arg2=value2...\"")
flags.StringVar(&o.controllerManagerOverridesString, "controllermanager-arg-overrides", "", "comma separated list of federation-controller-manager arguments to override: Example \"--arg1=value1,--arg2=value2...\"")
flags.StringVar(&o.apiServerServiceTypeString, apiserverServiceTypeFlag, string(v1.ServiceTypeLoadBalancer), "The type of service to create for federation API server. Options: 'LoadBalancer' (default), 'NodePort'.")
flags.StringVar(&o.apiServerAdvertiseAddress, apiserverAdvertiseAddressFlag, "", "Preferred address to advertise api server nodeport service. Valid only if '"+apiserverServiceTypeFlag+"=NodePort'.")
flags.BoolVar(&o.apiServerEnableHTTPBasicAuth, "apiserver-enable-basic-auth", false, "Enables HTTP Basic authentication for the federation-apiserver. Defaults to false.")
flags.BoolVar(&o.apiServerEnableTokenAuth, "apiserver-enable-token-auth", false, "Enables token authentication for the federation-apiserver. Defaults to false.")
}
// NewCmdInit defines the `init` command that bootstraps a federation
// control plane inside a set of host clusters.
func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {
opts := &initFederation{}
cmd := &cobra.Command{
Use: "init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT",
Short: "init initializes a federation control plane",
Long: init_long,
Example: init_example,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(opts.Complete(cmd, args))
cmdutil.CheckErr(opts.Run(cmdOut, config))
},
}
flags := cmd.Flags()
opts.commonOptions.Bind(flags)
opts.options.Bind(flags)
return cmd
}
type entityKeyPairs struct {
ca *triple.KeyPair
server *triple.KeyPair
controllerManager *triple.KeyPair
admin *triple.KeyPair
}
type credentials struct {
username string
password string
token string
certEntKeyPairs *entityKeyPairs
}
// Complete ensures that options are valid and marshals them if necessary.
func (i *initFederation) Complete(cmd *cobra.Command, args []string) error {
if len(i.options.dnsProvider) == 0 {
return fmt.Errorf("--dns-provider is mandatory")
}
err := i.commonOptions.SetName(cmd, args)
if err != nil {
return err
}
i.options.apiServerServiceType = v1.ServiceType(i.options.apiServerServiceTypeString)
if i.options.apiServerServiceType != v1.ServiceTypeLoadBalancer && i.options.apiServerServiceType != v1.ServiceTypeNodePort {
return fmt.Errorf("invalid %s: %s, should be either %s or %s", apiserverServiceTypeFlag, i.options.apiServerServiceType, v1.ServiceTypeLoadBalancer, v1.ServiceTypeNodePort)
}
if i.options.apiServerAdvertiseAddress != "" {
ip := net.ParseIP(i.options.apiServerAdvertiseAddress)
if ip == nil {
return fmt.Errorf("invalid %s: %s, should be a valid ip address", apiserverAdvertiseAddressFlag, i.options.apiServerAdvertiseAddress)
}
if i.options.apiServerServiceType != v1.ServiceTypeNodePort {
return fmt.Errorf("%s should be passed only with '%s=NodePort'", apiserverAdvertiseAddressFlag, apiserverServiceTypeFlag)
}
}
i.options.apiServerOverrides, err = marshallOverrides(i.options.apiServerOverridesString)
if err != nil {
return fmt.Errorf("error marshalling --apiserver-arg-overrides: %v", err)
}
i.options.controllerManagerOverrides, err = marshallOverrides(i.options.controllerManagerOverridesString)
if err != nil {
return fmt.Errorf("error marshalling --controllermanager-arg-overrides: %v", err)
}
if i.options.dnsProviderConfig != "" {
if _, err := os.Stat(i.options.dnsProviderConfig); err != nil {
return fmt.Errorf("error reading file provided to --dns-provider-config flag, err: %v", err)
}
}
return nil
}
// Run initializes a federation control plane.
// See the design doc in https://github.com/kubernetes/kubernetes/pull/34484
// for details.
func (i *initFederation) Run(cmdOut io.Writer, config util.AdminConfig) error {
hostFactory := config.ClusterFactory(i.commonOptions.Host, i.commonOptions.Kubeconfig)
hostClientset, err := hostFactory.ClientSet()
if err != nil {
return err
}
rbacAvailable := true
rbacVersionedClientset, err := util.GetVersionedClientForRBACOrFail(hostFactory)
if err != nil {
if _, ok := err.(*util.NoRBACAPIError); !ok {
return err
}
// If the error is type NoRBACAPIError, We continue to create the rest of
// the resources, without the SA and roles (in the abscense of RBAC support).
rbacAvailable = false
}
serverName := fmt.Sprintf("%s-%s", i.commonOptions.Name, APIServerNameSuffix)
serverCredName := fmt.Sprintf("%s-%s", serverName, CredentialSuffix)
cmName := fmt.Sprintf("%s-%s", i.commonOptions.Name, CMNameSuffix)
cmKubeconfigName := fmt.Sprintf("%s-%s", cmName, KubeconfigNameSuffix)
var dnsProviderConfigBytes []byte
if i.options.dnsProviderConfig != "" {
dnsProviderConfigBytes, err = ioutil.ReadFile(i.options.dnsProviderConfig)
if err != nil {
return fmt.Errorf("Error reading file provided to --dns-provider-config flag, err: %v", err)
}
}
fmt.Fprintf(cmdOut, "Creating a namespace %s for federation system components...", i.commonOptions.FederationSystemNamespace)
glog.V(4).Infof("Creating a namespace %s for federation system components", i.commonOptions.FederationSystemNamespace)
_, err = createNamespace(hostClientset, i.commonOptions.Name, i.commonOptions.FederationSystemNamespace, i.options.dryRun)
if err != nil {
return err
}
fmt.Fprintln(cmdOut, " done")
fmt.Fprint(cmdOut, "Creating federation control plane service...")
glog.V(4).Info("Creating federation control plane service")
svc, ips, hostnames, err := createService(cmdOut, hostClientset, i.commonOptions.FederationSystemNamespace, serverName, i.commonOptions.Name, i.options.apiServerAdvertiseAddress, i.options.apiServerServiceType, i.options.dryRun)
if err != nil {
return err
}
fmt.Fprintln(cmdOut, " done")
glog.V(4).Infof("Created service named %s with IP addresses %v, hostnames %v", svc.Name, ips, hostnames)
fmt.Fprint(cmdOut, "Creating federation control plane objects (credentials, persistent volume claim)...")
glog.V(4).Info("Generating TLS certificates and credentials for communicating with the federation API server")
credentials, err := generateCredentials(i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, HostClusterLocalDNSZoneName, serverCredName, ips, hostnames, i.options.apiServerEnableHTTPBasicAuth, i.options.apiServerEnableTokenAuth, i.options.dryRun)
if err != nil {
return err
}
// Create the secret containing the credentials.
_, err = createAPIServerCredentialsSecret(hostClientset, i.commonOptions.FederationSystemNamespace, serverCredName, i.commonOptions.Name, credentials, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Certificates and credentials generated")
glog.V(4).Info("Creating an entry in the kubeconfig file with the certificate and credential data")
_, err = createControllerManagerKubeconfigSecret(hostClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, cmKubeconfigName, credentials.certEntKeyPairs, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Credentials secret successfully created")
glog.V(4).Info("Creating a persistent volume and a claim to store the federation API server's state, including etcd data")
var pvc *api.PersistentVolumeClaim
if i.options.etcdPersistentStorage {
pvc, err = createPVC(hostClientset, i.commonOptions.FederationSystemNamespace, svc.Name, i.commonOptions.Name, i.options.etcdPVCapacity, i.options.dryRun)
if err != nil {
return err
}
}
glog.V(4).Info("Persistent volume and claim created")
fmt.Fprintln(cmdOut, " done")
// Since only one IP address can be specified as advertise address,
// we arbitrarily pick the first available IP address
// Pick user provided apiserverAdvertiseAddress over other available IP addresses.
advertiseAddress := i.options.apiServerAdvertiseAddress
if advertiseAddress == "" && len(ips) > 0 {
advertiseAddress = ips[0]
}
fmt.Fprint(cmdOut, "Creating federation component deployments...")
glog.V(4).Info("Creating federation control plane components")
_, err = createAPIServer(hostClientset, i.commonOptions.FederationSystemNamespace, serverName, i.commonOptions.Name, i.options.image, advertiseAddress, serverCredName, i.options.apiServerEnableHTTPBasicAuth, i.options.apiServerEnableTokenAuth, i.options.apiServerOverrides, pvc, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation API server")
sa := &api.ServiceAccount{}
sa.Name = ""
// Create a service account and related RBAC roles if the host cluster has RBAC support.
// TODO: We must evaluate creating a separate service account even when RBAC support is missing
if rbacAvailable {
glog.V(4).Info("Creating service account for federation controller manager in the host cluster")
sa, err = createControllerManagerSA(rbacVersionedClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation controller manager service account")
glog.V(4).Info("Creating RBAC role and role bindings for the federation controller manager's service account")
_, _, err = createRoleBindings(rbacVersionedClientset, i.commonOptions.FederationSystemNamespace, sa.Name, i.commonOptions.Name, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created RBAC role and role bindings")
}
glog.V(4).Info("Creating a DNS provider config secret")
dnsProviderSecret, err := createDNSProviderConfigSecret(hostClientset, i.commonOptions.FederationSystemNamespace, dnsProviderSecretName, i.commonOptions.Name, dnsProviderConfigBytes, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created DNS provider config secret")
glog.V(4).Info("Creating federation controller manager deployment")
_, err = createControllerManager(hostClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, cmName, i.options.image, cmKubeconfigName, i.options.dnsZoneName, i.options.dnsProvider, i.options.dnsProviderConfig, sa.Name, dnsProviderSecret, i.options.controllerManagerOverrides, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation controller manager deployment")
fmt.Println(cmdOut, " done")
fmt.Fprint(cmdOut, "Updating kubeconfig...")
glog.V(4).Info("Updating kubeconfig")
// Pick the first ip/hostname to update the api server endpoint in kubeconfig and also to give information to user
// In case of NodePort Service for api server, ips are node external ips.
endpoint := ""
if len(ips) > 0 {
endpoint = ips[0]
} else if len(hostnames) > 0 {
endpoint = hostnames[0]
}
// If the service is nodeport, need to append the port to endpoint as it is non-standard port
if i.options.apiServerServiceType == v1.ServiceTypeNodePort {
endpoint = endpoint + ":" + strconv.Itoa(int(svc.Spec.Ports[0].NodePort))
}
err = updateKubeconfig(config, i.commonOptions.Name, endpoint, i.commonOptions.Kubeconfig, credentials, i.options.dryRun)
if err != nil {
glog.V(4).Infof("Failed to update kubeconfig: %v", err)
return err
}
fmt.Fprintln(cmdOut, " done")
glog.V(4).Info("Successfully updated kubeconfig")
if !i.options.dryRun {
fmt.Fprint(cmdOut, "Waiting for federation control plane to come up...")
glog.V(4).Info("Waiting for federation control plane to come up")
fedPods := []string{serverName, cmName}
err = waitForPods(cmdOut, hostClientset, fedPods, i.commonOptions.FederationSystemNamespace)
if err != nil {
return err
}
err = waitSrvHealthy(cmdOut, config, i.commonOptions.Name, i.commonOptions.Kubeconfig)
if err != nil {
return err
}
glog.V(4).Info("Federation control plane running")
fmt.Fprintln(cmdOut, " done")
return printSuccess(cmdOut, ips, hostnames, svc)
}
_, err = fmt.Fprintln(cmdOut, "Federation control plane runs (dry run)")
glog.V(4).Info("Federation control plane runs (dry run)")
return err
}
func createNamespace(clientset client.Interface, federationName, namespace string, dryRun bool) (*api.Namespace, error) {
ns := &api.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
}
if dryRun {
return ns, nil
}
return clientset.Core().Namespaces().Create(ns)
}
func createService(cmdOut io.Writer, clientset client.Interface, namespace, svcName, federationName, apiserverAdvertiseAddress string, apiserverServiceType v1.ServiceType, dryRun bool) (*api.Service, []string, []string, error) {
svc := &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcName,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: api.ServiceSpec{
Type: api.ServiceType(apiserverServiceType),
Selector: apiserverSvcSelector,
Ports: []api.ServicePort{
{
Name: "https",
Protocol: "TCP",
Port: 443,
TargetPort: intstr.FromString(apiServerSecurePortName),
},
},
},
}
if dryRun {
return svc, nil, nil, nil
}
var err error
svc, err = clientset.Core().Services(namespace).Create(svc)
ips := []string{}
hostnames := []string{}
if apiserverServiceType == v1.ServiceTypeLoadBalancer {
ips, hostnames, err = waitForLoadBalancerAddress(cmdOut, clientset, svc, dryRun)
} else {
if apiserverAdvertiseAddress != "" {
ips = append(ips, apiserverAdvertiseAddress)
} else {
ips, err = getClusterNodeIPs(clientset)
}
}
if err != nil {
return svc, nil, nil, err
}
return svc, ips, hostnames, err
}
func getClusterNodeIPs(clientset client.Interface) ([]string, error) {
preferredAddressTypes := []api.NodeAddressType{
api.NodeExternalIP,
}
nodeList, err := clientset.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
nodeAddresses := []string{}
for _, node := range nodeList.Items {
OuterLoop:
for _, addressType := range preferredAddressTypes {
for _, address := range node.Status.Addresses {
if address.Type == addressType {
nodeAddresses = append(nodeAddresses, address.Address)
break OuterLoop
}
}
}
}
return nodeAddresses, nil
}
func waitForLoadBalancerAddress(cmdOut io.Writer, clientset client.Interface, svc *api.Service, dryRun bool) ([]string, []string, error) {
ips := []string{}
hostnames := []string{}
if dryRun {
return ips, hostnames, nil
}
err := wait.PollImmediateInfinite(lbAddrRetryInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
pollSvc, err := clientset.Core().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if ings := pollSvc.Status.LoadBalancer.Ingress; len(ings) > 0 {
for _, ing := range ings {
if len(ing.IP) > 0 {
ips = append(ips, ing.IP)
}
if len(ing.Hostname) > 0 {
hostnames = append(hostnames, ing.Hostname)
}
}
if len(ips) > 0 || len(hostnames) > 0 {
return true, nil
}
}
return false, nil
})
if err != nil {
return nil, nil, err
}
return ips, hostnames, nil
}
func generateCredentials(svcNamespace, name, svcName, localDNSZoneName, serverCredName string, ips, hostnames []string, enableHTTPBasicAuth, enableTokenAuth, dryRun bool) (*credentials, error) {
credentials := credentials{
username: AdminCN,
}
if enableHTTPBasicAuth {
credentials.password = string(uuid.NewUUID())
}
if enableTokenAuth {
credentials.token = string(uuid.NewUUID())
}
entKeyPairs, err := genCerts(svcNamespace, name, svcName, localDNSZoneName, ips, hostnames)
if err != nil {
return nil, err
}
credentials.certEntKeyPairs = entKeyPairs
return &credentials, nil
}
func genCerts(svcNamespace, name, svcName, localDNSZoneName string, ips, hostnames []string) (*entityKeyPairs, error) {
ca, err := triple.NewCA(name)
if err != nil {
return nil, fmt.Errorf("failed to create CA key and certificate: %v", err)
}
server, err := triple.NewServerKeyPair(ca, APIServerCN, svcName, svcNamespace, localDNSZoneName, ips, hostnames)
if err != nil {
return nil, fmt.Errorf("failed to create federation API server key and certificate: %v", err)
}
cm, err := triple.NewClientKeyPair(ca, ControllerManagerCN, nil)
if err != nil {
return nil, fmt.Errorf("failed to create federation controller manager client key and certificate: %v", err)
}
admin, err := triple.NewClientKeyPair(ca, AdminCN, nil)
if err != nil {
return nil, fmt.Errorf("failed to create client key and certificate for an admin: %v", err)
}
return &entityKeyPairs{
ca: ca,
server: server,
controllerManager: cm,
admin: admin,
}, nil
}
func createAPIServerCredentialsSecret(clientset client.Interface, namespace, credentialsName, federationName string, credentials *credentials, dryRun bool) (*api.Secret, error) {
// Build the secret object with API server credentials.
data := map[string][]byte{
"ca.crt": certutil.EncodeCertPEM(credentials.certEntKeyPairs.ca.Cert),
"server.crt": certutil.EncodeCertPEM(credentials.certEntKeyPairs.server.Cert),
"server.key": certutil.EncodePrivateKeyPEM(credentials.certEntKeyPairs.server.Key),
}
if credentials.password != "" {
data["basicauth.csv"] = authFileContents(credentials.username, credentials.password)
}
if credentials.token != "" {
data["token.csv"] = authFileContents(credentials.username, credentials.token)
}
secret := &api.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: credentialsName,
Namespace: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Data: data,
}
if dryRun {
return secret, nil
}
// Boilerplate to create the secret in the host cluster.
return clientset.Core().Secrets(namespace).Create(secret)
}
func createControllerManagerKubeconfigSecret(clientset client.Interface, namespace, name, svcName, kubeconfigName string, entKeyPairs *entityKeyPairs, dryRun bool) (*api.Secret, error) {
config := kubeconfigutil.CreateWithCerts(
fmt.Sprintf("https://%s", svcName),
name,
ControllerManagerUser,
certutil.EncodeCertPEM(entKeyPairs.ca.Cert),
certutil.EncodePrivateKeyPEM(entKeyPairs.controllerManager.Key),
certutil.EncodeCertPEM(entKeyPairs.controllerManager.Cert),
)
return util.CreateKubeconfigSecret(clientset, config, namespace, kubeconfigName, name, "", dryRun)
}
func createPVC(clientset client.Interface, namespace, svcName, federationName, etcdPVCapacity string, dryRun bool) (*api.PersistentVolumeClaim, error) {
capacity, err := resource.ParseQuantity(etcdPVCapacity)
if err != nil {
return nil, err
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-etcd-claim", svcName),
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "yes",
federation.FederationNameAnnotation: federationName},
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceStorage: capacity,
},
},
},
}
if dryRun {
return pvc, nil
}
return clientset.Core().PersistentVolumeClaims(namespace).Create(pvc)
}
func createAPIServer(clientset client.Interface, namespace, name, federationName, image, advertiseAddress, credentialsName string, hasHTTPBasicAuthFile, hasTokenAuthFile bool, argOverrides map[string]string, pvc *api.PersistentVolumeClaim, dryRun bool) (*extensions.Deployment, error) {
command := []string{
"/hyperkube",
"federation-apiserver",
}
argsMap := map[string]string{
"--bind-address": "0.0.0.0",
"--etcd-servers": "http://localhost:2379",
"--secure-port": fmt.Sprintf("%d", apiServerSecurePort),
"--client-ca-file": "/etc/federation/apiserver/ca.crt",
"--tls-cert-file": "/etc/federation/apiserver/server.crt",
"--tls-private-key-file": "/etc/federation/apiserver/server.key",
"--admission-control": "NamespaceLifecycle",
}
if advertiseAddress != "" {
argsMap["--advertise-address"] = advertiseAddress
}
if hasHTTPBasicAuthFile {
argsMap["--basic-auth-file"] = "/etc/federation/apiserver/basicauth.csv"
}
if hasTokenAuthFile {
argsMap["--token-auth-file"] = "/etc/federation/apiserver/token.csv"
}
args := argMapsToArgStrings(argsMap, argOverrides)
command = append(command, args...)
dep := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: extensions.DeploymentSpec{
Replicas: 1,
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: apiserverPodLabels,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "apiserver",
Image: image,
Command: command,
Ports: []api.ContainerPort{
{
Name: apiServerSecurePortName,
ContainerPort: apiServerSecurePort,
},
{
Name: "local",
ContainerPort: 8080,
},
},
VolumeMounts: []api.VolumeMount{
{
Name: credentialsName,
MountPath: "/etc/federation/apiserver",
ReadOnly: true,
},
},
},
{
Name: "etcd",
Image: "gcr.io/google_containers/etcd:3.0.17",
Command: []string{
"/usr/local/bin/etcd",
"--data-dir",
"/var/etcd/data",
},
},
},
Volumes: []api.Volume{
{
Name: credentialsName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: credentialsName,
},
},
},
},
},
},
},
}
if pvc != nil {
dataVolumeName := "etcddata"
etcdVolume := api.Volume{
Name: dataVolumeName,
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
}
etcdVolumeMount := api.VolumeMount{
Name: dataVolumeName,
MountPath: "/var/etcd",
}
dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, etcdVolume)
for i, container := range dep.Spec.Template.Spec.Containers {
if container.Name == "etcd" {
dep.Spec.Template.Spec.Containers[i].VolumeMounts = append(dep.Spec.Template.Spec.Containers[i].VolumeMounts, etcdVolumeMount)
}
}
}
if dryRun {
return dep, nil
}
createdDep, err := clientset.Extensions().Deployments(namespace).Create(dep)
return createdDep, err
}
func createControllerManagerSA(clientset client.Interface, namespace, federationName string, dryRun bool) (*api.ServiceAccount, error) {
sa := &api.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: ControllerManagerSA,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
}
if dryRun {
return sa, nil
}
return clientset.Core().ServiceAccounts(namespace).Create(sa)
}
func createRoleBindings(clientset client.Interface, namespace, saName, federationName string, dryRun bool) (*rbac.Role, *rbac.RoleBinding, error) {
roleName := "federation-system:federation-controller-manager"
role := &rbac.Role{
// a role to use for bootstrapping the federation-controller-manager so it can access
// secrets in the host cluster to access other clusters.
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(legacyAPIGroup).Resources("secrets").RuleOrDie(),
},
}
rolebinding, err := rbac.NewRoleBinding(roleName, namespace).SAs(namespace, saName).Binding()
if err != nil {
return nil, nil, err
}
rolebinding.Labels = componentLabel
rolebinding.Annotations = map[string]string{federation.FederationNameAnnotation: federationName}
if dryRun {
return role, &rolebinding, nil
}
newRole, err := clientset.Rbac().Roles(namespace).Create(role)
if err != nil {
return nil, nil, err
}
newRolebinding, err := clientset.Rbac().RoleBindings(namespace).Create(&rolebinding)
return newRole, newRolebinding, err
}
func createControllerManager(clientset client.Interface, namespace, name, svcName, cmName, image, kubeconfigName, dnsZoneName, dnsProvider, dnsProviderConfig, saName string, dnsProviderSecret *api.Secret, argOverrides map[string]string, dryRun bool) (*extensions.Deployment, error) {
command := []string{
"/hyperkube",
"federation-controller-manager",
}
argsMap := map[string]string{
"--kubeconfig": "/etc/federation/controller-manager/kubeconfig",
}
argsMap["--master"] = fmt.Sprintf("https://%s", svcName)
argsMap["--dns-provider"] = dnsProvider
argsMap["--federation-name"] = name
argsMap["--zone-name"] = dnsZoneName
args := argMapsToArgStrings(argsMap, argOverrides)
command = append(command, args...)
dep := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Namespace: namespace,
Labels: componentLabel,
// We additionally update the details (in annotations) about the
// kube-dns config map which needs to be created in the clusters
// registering to this federation (at kubefed join).
// We wont otherwise have this information available at kubefed join.
Annotations: map[string]string{
// TODO: the name/domain name pair should ideally be checked for naming convention
// as done in kube-dns federation flags check.
// https://github.com/kubernetes/dns/blob/master/pkg/dns/federation/federation.go
// TODO v2: Until kube-dns can handle trailing periods we strip them all.
// See https://github.com/kubernetes/dns/issues/67
util.FedDomainMapKey: fmt.Sprintf("%s=%s", name, strings.TrimRight(dnsZoneName, ".")),
federation.FederationNameAnnotation: name,
},
},
Spec: extensions.DeploymentSpec{
Replicas: 1,
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Labels: controllerManagerPodLabels,
Annotations: map[string]string{federation.FederationNameAnnotation: name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "controller-manager",
Image: image,
Command: command,
VolumeMounts: []api.VolumeMount{
{
Name: kubeconfigName,
MountPath: "/etc/federation/controller-manager",
ReadOnly: true,
},
},
Env: []api.EnvVar{
{
Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
},
},
Volumes: []api.Volume{
{
Name: kubeconfigName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: kubeconfigName,
},
},
},
},
},
},
},
}
if saName != "" {
dep.Spec.Template.Spec.ServiceAccountName = saName
}
if dnsProviderSecret != nil {
dep = addDNSProviderConfig(dep, dnsProviderSecret.Name)
if dnsProvider == util.FedDNSProviderCoreDNS {
var err error
dep, err = addCoreDNSServerAnnotation(dep, dnsZoneName, dnsProviderConfig)
if err != nil {
return nil, err
}
}
}
if dryRun {
return dep, nil
}
return clientset.Extensions().Deployments(namespace).Create(dep)
}
func marshallOverrides(overrideArgString string) (map[string]string, error) {
if overrideArgString == "" {
return nil, nil
}
argsMap := make(map[string]string)
overrideArgs := strings.Split(overrideArgString, ",")
for _, overrideArg := range overrideArgs {
splitArg := strings.SplitN(overrideArg, "=", 2)
if len(splitArg) != 2 {
return nil, fmt.Errorf("wrong format for override arg: %s", overrideArg)
}
key := strings.TrimSpace(splitArg[0])
val := strings.TrimSpace(splitArg[1])
if len(key) == 0 {
return nil, fmt.Errorf("wrong format for override arg: %s, arg name cannot be empty", overrideArg)
}
argsMap[key] = val
}
return argsMap, nil
}
func argMapsToArgStrings(argsMap, overrides map[string]string) []string {
for key, val := range overrides {
argsMap[key] = val
}
args := []string{}
for key, value := range argsMap {
args = append(args, fmt.Sprintf("%s=%s", key, value))
}
// This is needed for the unit test deep copy to get an exact match
sort.Strings(args)
return args
}
func waitForPods(cmdOut io.Writer, clientset client.Interface, fedPods []string, namespace string) error {
err := wait.PollInfinite(podWaitInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
podCheck := len(fedPods)
podList, err := clientset.Core().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return false, nil
}
for _, pod := range podList.Items {
for _, fedPod := range fedPods {
if strings.HasPrefix(pod.Name, fedPod) && pod.Status.Phase == "Running" {
podCheck -= 1
}
}
//ensure that all pods are in running state or keep waiting
if podCheck == 0 {
return true, nil
}
}
return false, nil
})
return err
}
func waitSrvHealthy(cmdOut io.Writer, config util.AdminConfig, context, kubeconfig string) error {
fedClientSet, err := config.FederationClientset(context, kubeconfig)
if err != nil {
return err
}
fedDiscoveryClient := fedClientSet.Discovery()
err = wait.PollInfinite(podWaitInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
body, err := fedDiscoveryClient.RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err != nil {
return false, nil
}
if strings.EqualFold(string(body), "ok") {
return true, nil
}
return false, nil
})
return err
}
func printSuccess(cmdOut io.Writer, ips, hostnames []string, svc *api.Service) error {
svcEndpoints := append(ips, hostnames...)
endpoints := strings.Join(svcEndpoints, ", ")
if svc.Spec.Type == api.ServiceTypeNodePort {
endpoints = ips[0] + ":" + strconv.Itoa(int(svc.Spec.Ports[0].NodePort))
if len(ips) > 1 {
endpoints = endpoints + ", ..."
}
}
_, err := fmt.Fprintf(cmdOut, "Federation API server is running at: %s\n", endpoints)
return err
}
func updateKubeconfig(config util.AdminConfig, name, endpoint, kubeConfigPath string, credentials *credentials, dryRun bool) error {
po := config.PathOptions()
po.LoadingRules.ExplicitPath = kubeConfigPath
kubeconfig, err := po.GetStartingConfig()
if err != nil {
return err
}
// Populate API server endpoint info.
cluster := clientcmdapi.NewCluster()
// Prefix "https" as the URL scheme to endpoint.
if !strings.HasPrefix(endpoint, "https://") {
endpoint = fmt.Sprintf("https://%s", endpoint)
}
cluster.Server = endpoint
cluster.CertificateAuthorityData = certutil.EncodeCertPEM(credentials.certEntKeyPairs.ca.Cert)
// Populate credentials.
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = certutil.EncodeCertPEM(credentials.certEntKeyPairs.admin.Cert)
authInfo.ClientKeyData = certutil.EncodePrivateKeyPEM(credentials.certEntKeyPairs.admin.Key)
authInfo.Token = credentials.token
var httpBasicAuthInfo *clientcmdapi.AuthInfo
if credentials.password != "" {
httpBasicAuthInfo = clientcmdapi.NewAuthInfo()
httpBasicAuthInfo.Password = credentials.password
httpBasicAuthInfo.Username = credentials.username
}
// Populate context.
context := clientcmdapi.NewContext()
context.Cluster = name
context.AuthInfo = name
// Update the config struct with API server endpoint info,
// credentials and context.
kubeconfig.Clusters[name] = cluster
kubeconfig.AuthInfos[name] = authInfo
if httpBasicAuthInfo != nil {
kubeconfig.AuthInfos[fmt.Sprintf("%s-basic-auth", name)] = httpBasicAuthInfo
}
kubeconfig.Contexts[name] = context
if !dryRun {
// Write the update kubeconfig.
if err := clientcmd.ModifyConfig(po, *kubeconfig, true); err != nil {
return err
}
}
return nil
}
func createDNSProviderConfigSecret(clientset client.Interface, namespace, name, federationName string, dnsProviderConfigBytes []byte, dryRun bool) (*api.Secret, error) {
if dnsProviderConfigBytes == nil {
return nil, nil
}
secretSpec := &api.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Data: map[string][]byte{
name: dnsProviderConfigBytes,
},
}
var secret *api.Secret
var err error
if !dryRun {
secret, err = clientset.Core().Secrets(namespace).Create(secretSpec)
if err != nil {
return nil, err
}
}
return secret, nil
}
func addDNSProviderConfig(dep *extensions.Deployment, secretName string) *extensions.Deployment {
const (
dnsProviderConfigVolume = "config-volume"
dnsProviderConfigMountPath = "/etc/federation/dns-provider"
)
// Create a volume from dns-provider secret
volume := api.Volume{
Name: dnsProviderConfigVolume,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: secretName,
},
},
}
dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, volume)
// Mount dns-provider secret volume to controller-manager container
volumeMount := api.VolumeMount{
Name: dnsProviderConfigVolume,
MountPath: dnsProviderConfigMountPath,
ReadOnly: true,
}
dep.Spec.Template.Spec.Containers[0].VolumeMounts = append(dep.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
dep.Spec.Template.Spec.Containers[0].Command = append(dep.Spec.Template.Spec.Containers[0].Command, fmt.Sprintf("--dns-provider-config=%s/%s", dnsProviderConfigMountPath, secretName))
return dep
}
// authFileContents returns a CSV string containing the contents of an
// authentication file in the format required by the federation-apiserver.
func authFileContents(username, authSecret string) []byte {
return []byte(fmt.Sprintf("%s,%s,%s\n", authSecret, username, uuid.NewUUID()))
}
func addCoreDNSServerAnnotation(deployment *extensions.Deployment, dnsZoneName, dnsProviderConfig string) (*extensions.Deployment, error) {
var cfg coredns.Config
if err := gcfg.ReadFileInto(&cfg, dnsProviderConfig); err != nil {
return nil, err
}
deployment.Annotations[util.FedDNSZoneName] = dnsZoneName
deployment.Annotations[util.FedNameServer] = cfg.Global.CoreDNSEndpoints
deployment.Annotations[util.FedDNSProvider] = util.FedDNSProviderCoreDNS
return deployment, nil
}
| federation/pkg/kubefed/init/init.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.24242419004440308,
0.0026813887525349855,
0.00016008905367925763,
0.0001696914987405762,
0.02234768308699131
] |
{
"id": 6,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/apis/rbac\"\n",
"\tclient \"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset\"\n",
"\t\"k8s.io/kubernetes/pkg/kubectl/cmd/templates\"\n",
"\tcmdutil \"k8s.io/kubernetes/pkg/kubectl/cmd/util\"\n",
"\t\"k8s.io/kubernetes/pkg/version\"\n",
"\n",
"\t\"github.com/golang/glog\"\n",
"\t\"github.com/spf13/cobra\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 53
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["circbuf.go"],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| vendor/github.com/armon/circbuf/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017289273091591895,
0.0001718508283374831,
0.00017054186901077628,
0.0001721178414300084,
9.781327889868408e-7
] |
{
"id": 6,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/apis/rbac\"\n",
"\tclient \"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset\"\n",
"\t\"k8s.io/kubernetes/pkg/kubectl/cmd/templates\"\n",
"\tcmdutil \"k8s.io/kubernetes/pkg/kubectl/cmd/util\"\n",
"\t\"k8s.io/kubernetes/pkg/version\"\n",
"\n",
"\t\"github.com/golang/glog\"\n",
"\t\"github.com/spf13/cobra\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 53
} | GCE_HOSTS=
GCE_IMAGE_CONFIG_PATH=test/e2e_node/jenkins/image-config.yaml
GCE_ZONE=us-central1-f
GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
| test/e2e_node/jenkins/conformance/jenkins-conformance.properties | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00016611668979749084,
0.00016611668979749084,
0.00016611668979749084,
0.00016611668979749084,
0
] |
{
"id": 6,
"code_window": [
"\t\"k8s.io/kubernetes/pkg/apis/rbac\"\n",
"\tclient \"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset\"\n",
"\t\"k8s.io/kubernetes/pkg/kubectl/cmd/templates\"\n",
"\tcmdutil \"k8s.io/kubernetes/pkg/kubectl/cmd/util\"\n",
"\t\"k8s.io/kubernetes/pkg/version\"\n",
"\n",
"\t\"github.com/golang/glog\"\n",
"\t\"github.com/spf13/cobra\"\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 53
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package nfs contains the internal representation of Ceph file system
// (CephFS) volumes.
package cephfs // import "k8s.io/kubernetes/pkg/volume/cephfs"
| pkg/volume/cephfs/doc.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017333758296445012,
0.0001690140343271196,
0.00016469048568978906,
0.0001690140343271196,
0.000004323548637330532
] |
{
"id": 7,
"code_window": [
"\tcontrollerManagerPodLabels = map[string]string{\n",
"\t\t\"app\": \"federated-cluster\",\n",
"\t\t\"module\": \"federation-controller-manager\",\n",
"\t}\n",
"\n",
"\thyperkubeImageName = \"gcr.io/google_containers/hyperkube-amd64\"\n",
")\n",
"\n",
"type initFederation struct {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 128
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"os"
"k8s.io/kubernetes/federation/pkg/kubefed"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/logs"
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
)
func Run() error {
logs.InitLogs()
defer logs.FlushLogs()
cmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)
return cmd.Execute()
}
| federation/cmd/kubefed/app/kubefed.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00020078543457202613,
0.00018274140893481672,
0.00017355804448015988,
0.0001783111074473709,
0.000010597099389997311
] |
{
"id": 7,
"code_window": [
"\tcontrollerManagerPodLabels = map[string]string{\n",
"\t\t\"app\": \"federated-cluster\",\n",
"\t\t\"module\": \"federation-controller-manager\",\n",
"\t}\n",
"\n",
"\thyperkubeImageName = \"gcr.io/google_containers/hyperkube-amd64\"\n",
")\n",
"\n",
"type initFederation struct {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 128
} | // +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by conversion-gen. Do not edit it manually!
package v1alpha1
import (
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
apiserver "k8s.io/apiserver/pkg/apis/apiserver"
)
func init() {
SchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(scheme *runtime.Scheme) error {
return scheme.AddGeneratedConversionFuncs(
Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration,
Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration,
Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration,
Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration,
)
}
func autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error {
if in.Plugins != nil {
in, out := &in.Plugins, &out.Plugins
*out = make([]apiserver.AdmissionPluginConfiguration, len(*in))
for i := range *in {
if err := Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Plugins = nil
}
return nil
}
// Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in *AdmissionConfiguration, out *apiserver.AdmissionConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_AdmissionConfiguration_To_apiserver_AdmissionConfiguration(in, out, s)
}
func autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error {
if in.Plugins != nil {
in, out := &in.Plugins, &out.Plugins
*out = make([]AdmissionPluginConfiguration, len(*in))
for i := range *in {
if err := Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Plugins = make([]AdmissionPluginConfiguration, 0)
}
return nil
}
// Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration is an autogenerated conversion function.
func Convert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in *apiserver.AdmissionConfiguration, out *AdmissionConfiguration, s conversion.Scope) error {
return autoConvert_apiserver_AdmissionConfiguration_To_v1alpha1_AdmissionConfiguration(in, out, s)
}
func autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error {
out.Name = in.Name
out.Path = in.Path
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Configuration, &out.Configuration, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration is an autogenerated conversion function.
func Convert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in *AdmissionPluginConfiguration, out *apiserver.AdmissionPluginConfiguration, s conversion.Scope) error {
return autoConvert_v1alpha1_AdmissionPluginConfiguration_To_apiserver_AdmissionPluginConfiguration(in, out, s)
}
func autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error {
out.Name = in.Name
out.Path = in.Path
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Configuration, &out.Configuration, s); err != nil {
return err
}
return nil
}
// Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration is an autogenerated conversion function.
func Convert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in *apiserver.AdmissionPluginConfiguration, out *AdmissionPluginConfiguration, s conversion.Scope) error {
return autoConvert_apiserver_AdmissionPluginConfiguration_To_v1alpha1_AdmissionPluginConfiguration(in, out, s)
}
| staging/src/k8s.io/apiserver/pkg/apis/apiserver/v1alpha1/zz_generated.conversion.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00024389328609686345,
0.0001840736367739737,
0.00016449243412353098,
0.00017814313468988985,
0.000020406407202244736
] |
{
"id": 7,
"code_window": [
"\tcontrollerManagerPodLabels = map[string]string{\n",
"\t\t\"app\": \"federated-cluster\",\n",
"\t\t\"module\": \"federation-controller-manager\",\n",
"\t}\n",
"\n",
"\thyperkubeImageName = \"gcr.io/google_containers/hyperkube-amd64\"\n",
")\n",
"\n",
"type initFederation struct {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 128
} | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "storage.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, RegisterDefaults)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&StorageClass{},
&StorageClassList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
| pkg/apis/storage/v1beta1/register.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0008401580853387713,
0.0002679487515706569,
0.00016812756075523794,
0.0001737285783747211,
0.0002336351026315242
] |
{
"id": 7,
"code_window": [
"\tcontrollerManagerPodLabels = map[string]string{\n",
"\t\t\"app\": \"federated-cluster\",\n",
"\t\t\"module\": \"federation-controller-manager\",\n",
"\t}\n",
"\n",
"\thyperkubeImageName = \"gcr.io/google_containers/hyperkube-amd64\"\n",
")\n",
"\n",
"type initFederation struct {\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"replace",
"replace",
"keep",
"keep",
"keep"
],
"after_edit": [],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 128
} | kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: a-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10G
| hack/testdata/prune-reap/a.yml | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017399713397026062,
0.000173863023519516,
0.00017372891306877136,
0.000173863023519516,
1.341104507446289e-7
] |
{
"id": 8,
"code_window": [
"\tapiServerEnableTokenAuth bool\n",
"}\n",
"\n",
"func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {\n",
"\tdefaultImage := fmt.Sprintf(\"%s:%s\", hyperkubeImageName, version.Get())\n",
"\n",
"\tflags.StringVar(&o.dnsZoneName, \"dns-zone-name\", \"\", \"DNS suffix for this federation. Federated Service DNS names are published with this suffix.\")\n",
"\tflags.StringVar(&o.image, \"image\", defaultImage, \"Image to use for federation API server and controller manager binaries.\")\n",
"\tflags.StringVar(&o.dnsProvider, \"dns-provider\", \"\", \"Dns provider to be used for this deployment.\")\n",
"\tflags.StringVar(&o.dnsProviderConfig, \"dns-provider-config\", \"\", \"Config file path on local file system for configuring DNS provider.\")\n",
"\tflags.StringVar(&o.etcdPVCapacity, \"etcd-pv-capacity\", \"10Gi\", \"Size of persistent volume claim to be used for etcd.\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (o *initFederationOptions) Bind(flags *pflag.FlagSet, defaultImage string) {\n"
],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 156
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO(madhusdancs):
// 1. Make printSuccess prepend protocol/scheme to the IPs/hostnames.
// 2. Separate etcd container from API server pod as a first step towards enabling HA.
// 3. Make API server and controller manager replicas customizable via the HA work.
package init
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"sort"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
triple "k8s.io/client-go/util/cert/triple"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
"k8s.io/kubernetes/federation/apis/federation"
"k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
"k8s.io/kubernetes/federation/pkg/kubefed/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/rbac"
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/version"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"gopkg.in/gcfg.v1"
)
const (
APIServerCN = "federation-apiserver"
ControllerManagerCN = "federation-controller-manager"
AdminCN = "admin"
HostClusterLocalDNSZoneName = "cluster.local."
APIServerNameSuffix = "apiserver"
CMNameSuffix = "controller-manager"
CredentialSuffix = "credentials"
KubeconfigNameSuffix = "kubeconfig"
// User name used by federation controller manager to make
// calls to federation API server.
ControllerManagerUser = "federation-controller-manager"
// Name of the ServiceAccount used by the federation controller manager
// to access the secrets in the host cluster.
ControllerManagerSA = "federation-controller-manager"
// Group name of the legacy/core API group
legacyAPIGroup = ""
lbAddrRetryInterval = 5 * time.Second
podWaitInterval = 2 * time.Second
apiserverServiceTypeFlag = "api-server-service-type"
apiserverAdvertiseAddressFlag = "api-server-advertise-address"
dnsProviderSecretName = "federation-dns-provider.conf"
apiServerSecurePortName = "https"
// Set the secure port to 8443 to avoid requiring root privileges
// to bind to port < 1000. The apiserver's service will still
// expose on port 443.
apiServerSecurePort = 8443
)
var (
init_long = templates.LongDesc(`
Initialize a federation control plane.
Federation control plane is hosted inside a Kubernetes
cluster. The host cluster must be specified using the
--host-cluster-context flag.`)
init_example = templates.Examples(`
# Initialize federation control plane for a federation
# named foo in the host cluster whose local kubeconfig
# context is bar.
kubefed init foo --host-cluster-context=bar`)
componentLabel = map[string]string{
"app": "federated-cluster",
}
apiserverSvcSelector = map[string]string{
"app": "federated-cluster",
"module": "federation-apiserver",
}
apiserverPodLabels = map[string]string{
"app": "federated-cluster",
"module": "federation-apiserver",
}
controllerManagerPodLabels = map[string]string{
"app": "federated-cluster",
"module": "federation-controller-manager",
}
hyperkubeImageName = "gcr.io/google_containers/hyperkube-amd64"
)
type initFederation struct {
commonOptions util.SubcommandOptions
options initFederationOptions
}
type initFederationOptions struct {
dnsZoneName string
image string
dnsProvider string
dnsProviderConfig string
etcdPVCapacity string
etcdPersistentStorage bool
dryRun bool
apiServerOverridesString string
apiServerOverrides map[string]string
controllerManagerOverridesString string
controllerManagerOverrides map[string]string
apiServerServiceTypeString string
apiServerServiceType v1.ServiceType
apiServerAdvertiseAddress string
apiServerEnableHTTPBasicAuth bool
apiServerEnableTokenAuth bool
}
func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {
defaultImage := fmt.Sprintf("%s:%s", hyperkubeImageName, version.Get())
flags.StringVar(&o.dnsZoneName, "dns-zone-name", "", "DNS suffix for this federation. Federated Service DNS names are published with this suffix.")
flags.StringVar(&o.image, "image", defaultImage, "Image to use for federation API server and controller manager binaries.")
flags.StringVar(&o.dnsProvider, "dns-provider", "", "Dns provider to be used for this deployment.")
flags.StringVar(&o.dnsProviderConfig, "dns-provider-config", "", "Config file path on local file system for configuring DNS provider.")
flags.StringVar(&o.etcdPVCapacity, "etcd-pv-capacity", "10Gi", "Size of persistent volume claim to be used for etcd.")
flags.BoolVar(&o.etcdPersistentStorage, "etcd-persistent-storage", true, "Use persistent volume for etcd. Defaults to 'true'.")
flags.BoolVar(&o.dryRun, "dry-run", false, "dry run without sending commands to server.")
flags.StringVar(&o.apiServerOverridesString, "apiserver-arg-overrides", "", "comma separated list of federation-apiserver arguments to override: Example \"--arg1=value1,--arg2=value2...\"")
flags.StringVar(&o.controllerManagerOverridesString, "controllermanager-arg-overrides", "", "comma separated list of federation-controller-manager arguments to override: Example \"--arg1=value1,--arg2=value2...\"")
flags.StringVar(&o.apiServerServiceTypeString, apiserverServiceTypeFlag, string(v1.ServiceTypeLoadBalancer), "The type of service to create for federation API server. Options: 'LoadBalancer' (default), 'NodePort'.")
flags.StringVar(&o.apiServerAdvertiseAddress, apiserverAdvertiseAddressFlag, "", "Preferred address to advertise api server nodeport service. Valid only if '"+apiserverServiceTypeFlag+"=NodePort'.")
flags.BoolVar(&o.apiServerEnableHTTPBasicAuth, "apiserver-enable-basic-auth", false, "Enables HTTP Basic authentication for the federation-apiserver. Defaults to false.")
flags.BoolVar(&o.apiServerEnableTokenAuth, "apiserver-enable-token-auth", false, "Enables token authentication for the federation-apiserver. Defaults to false.")
}
// NewCmdInit defines the `init` command that bootstraps a federation
// control plane inside a set of host clusters.
func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {
opts := &initFederation{}
cmd := &cobra.Command{
Use: "init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT",
Short: "init initializes a federation control plane",
Long: init_long,
Example: init_example,
Run: func(cmd *cobra.Command, args []string) {
cmdutil.CheckErr(opts.Complete(cmd, args))
cmdutil.CheckErr(opts.Run(cmdOut, config))
},
}
flags := cmd.Flags()
opts.commonOptions.Bind(flags)
opts.options.Bind(flags)
return cmd
}
type entityKeyPairs struct {
ca *triple.KeyPair
server *triple.KeyPair
controllerManager *triple.KeyPair
admin *triple.KeyPair
}
type credentials struct {
username string
password string
token string
certEntKeyPairs *entityKeyPairs
}
// Complete ensures that options are valid and marshals them if necessary.
func (i *initFederation) Complete(cmd *cobra.Command, args []string) error {
if len(i.options.dnsProvider) == 0 {
return fmt.Errorf("--dns-provider is mandatory")
}
err := i.commonOptions.SetName(cmd, args)
if err != nil {
return err
}
i.options.apiServerServiceType = v1.ServiceType(i.options.apiServerServiceTypeString)
if i.options.apiServerServiceType != v1.ServiceTypeLoadBalancer && i.options.apiServerServiceType != v1.ServiceTypeNodePort {
return fmt.Errorf("invalid %s: %s, should be either %s or %s", apiserverServiceTypeFlag, i.options.apiServerServiceType, v1.ServiceTypeLoadBalancer, v1.ServiceTypeNodePort)
}
if i.options.apiServerAdvertiseAddress != "" {
ip := net.ParseIP(i.options.apiServerAdvertiseAddress)
if ip == nil {
return fmt.Errorf("invalid %s: %s, should be a valid ip address", apiserverAdvertiseAddressFlag, i.options.apiServerAdvertiseAddress)
}
if i.options.apiServerServiceType != v1.ServiceTypeNodePort {
return fmt.Errorf("%s should be passed only with '%s=NodePort'", apiserverAdvertiseAddressFlag, apiserverServiceTypeFlag)
}
}
i.options.apiServerOverrides, err = marshallOverrides(i.options.apiServerOverridesString)
if err != nil {
return fmt.Errorf("error marshalling --apiserver-arg-overrides: %v", err)
}
i.options.controllerManagerOverrides, err = marshallOverrides(i.options.controllerManagerOverridesString)
if err != nil {
return fmt.Errorf("error marshalling --controllermanager-arg-overrides: %v", err)
}
if i.options.dnsProviderConfig != "" {
if _, err := os.Stat(i.options.dnsProviderConfig); err != nil {
return fmt.Errorf("error reading file provided to --dns-provider-config flag, err: %v", err)
}
}
return nil
}
// Run initializes a federation control plane.
// See the design doc in https://github.com/kubernetes/kubernetes/pull/34484
// for details.
func (i *initFederation) Run(cmdOut io.Writer, config util.AdminConfig) error {
hostFactory := config.ClusterFactory(i.commonOptions.Host, i.commonOptions.Kubeconfig)
hostClientset, err := hostFactory.ClientSet()
if err != nil {
return err
}
rbacAvailable := true
rbacVersionedClientset, err := util.GetVersionedClientForRBACOrFail(hostFactory)
if err != nil {
if _, ok := err.(*util.NoRBACAPIError); !ok {
return err
}
// If the error is type NoRBACAPIError, We continue to create the rest of
// the resources, without the SA and roles (in the abscense of RBAC support).
rbacAvailable = false
}
serverName := fmt.Sprintf("%s-%s", i.commonOptions.Name, APIServerNameSuffix)
serverCredName := fmt.Sprintf("%s-%s", serverName, CredentialSuffix)
cmName := fmt.Sprintf("%s-%s", i.commonOptions.Name, CMNameSuffix)
cmKubeconfigName := fmt.Sprintf("%s-%s", cmName, KubeconfigNameSuffix)
var dnsProviderConfigBytes []byte
if i.options.dnsProviderConfig != "" {
dnsProviderConfigBytes, err = ioutil.ReadFile(i.options.dnsProviderConfig)
if err != nil {
return fmt.Errorf("Error reading file provided to --dns-provider-config flag, err: %v", err)
}
}
fmt.Fprintf(cmdOut, "Creating a namespace %s for federation system components...", i.commonOptions.FederationSystemNamespace)
glog.V(4).Infof("Creating a namespace %s for federation system components", i.commonOptions.FederationSystemNamespace)
_, err = createNamespace(hostClientset, i.commonOptions.Name, i.commonOptions.FederationSystemNamespace, i.options.dryRun)
if err != nil {
return err
}
fmt.Fprintln(cmdOut, " done")
fmt.Fprint(cmdOut, "Creating federation control plane service...")
glog.V(4).Info("Creating federation control plane service")
svc, ips, hostnames, err := createService(cmdOut, hostClientset, i.commonOptions.FederationSystemNamespace, serverName, i.commonOptions.Name, i.options.apiServerAdvertiseAddress, i.options.apiServerServiceType, i.options.dryRun)
if err != nil {
return err
}
fmt.Fprintln(cmdOut, " done")
glog.V(4).Infof("Created service named %s with IP addresses %v, hostnames %v", svc.Name, ips, hostnames)
fmt.Fprint(cmdOut, "Creating federation control plane objects (credentials, persistent volume claim)...")
glog.V(4).Info("Generating TLS certificates and credentials for communicating with the federation API server")
credentials, err := generateCredentials(i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, HostClusterLocalDNSZoneName, serverCredName, ips, hostnames, i.options.apiServerEnableHTTPBasicAuth, i.options.apiServerEnableTokenAuth, i.options.dryRun)
if err != nil {
return err
}
// Create the secret containing the credentials.
_, err = createAPIServerCredentialsSecret(hostClientset, i.commonOptions.FederationSystemNamespace, serverCredName, i.commonOptions.Name, credentials, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Certificates and credentials generated")
glog.V(4).Info("Creating an entry in the kubeconfig file with the certificate and credential data")
_, err = createControllerManagerKubeconfigSecret(hostClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, cmKubeconfigName, credentials.certEntKeyPairs, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Credentials secret successfully created")
glog.V(4).Info("Creating a persistent volume and a claim to store the federation API server's state, including etcd data")
var pvc *api.PersistentVolumeClaim
if i.options.etcdPersistentStorage {
pvc, err = createPVC(hostClientset, i.commonOptions.FederationSystemNamespace, svc.Name, i.commonOptions.Name, i.options.etcdPVCapacity, i.options.dryRun)
if err != nil {
return err
}
}
glog.V(4).Info("Persistent volume and claim created")
fmt.Fprintln(cmdOut, " done")
// Since only one IP address can be specified as advertise address,
// we arbitrarily pick the first available IP address
// Pick user provided apiserverAdvertiseAddress over other available IP addresses.
advertiseAddress := i.options.apiServerAdvertiseAddress
if advertiseAddress == "" && len(ips) > 0 {
advertiseAddress = ips[0]
}
fmt.Fprint(cmdOut, "Creating federation component deployments...")
glog.V(4).Info("Creating federation control plane components")
_, err = createAPIServer(hostClientset, i.commonOptions.FederationSystemNamespace, serverName, i.commonOptions.Name, i.options.image, advertiseAddress, serverCredName, i.options.apiServerEnableHTTPBasicAuth, i.options.apiServerEnableTokenAuth, i.options.apiServerOverrides, pvc, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation API server")
sa := &api.ServiceAccount{}
sa.Name = ""
// Create a service account and related RBAC roles if the host cluster has RBAC support.
// TODO: We must evaluate creating a separate service account even when RBAC support is missing
if rbacAvailable {
glog.V(4).Info("Creating service account for federation controller manager in the host cluster")
sa, err = createControllerManagerSA(rbacVersionedClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation controller manager service account")
glog.V(4).Info("Creating RBAC role and role bindings for the federation controller manager's service account")
_, _, err = createRoleBindings(rbacVersionedClientset, i.commonOptions.FederationSystemNamespace, sa.Name, i.commonOptions.Name, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created RBAC role and role bindings")
}
glog.V(4).Info("Creating a DNS provider config secret")
dnsProviderSecret, err := createDNSProviderConfigSecret(hostClientset, i.commonOptions.FederationSystemNamespace, dnsProviderSecretName, i.commonOptions.Name, dnsProviderConfigBytes, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created DNS provider config secret")
glog.V(4).Info("Creating federation controller manager deployment")
_, err = createControllerManager(hostClientset, i.commonOptions.FederationSystemNamespace, i.commonOptions.Name, svc.Name, cmName, i.options.image, cmKubeconfigName, i.options.dnsZoneName, i.options.dnsProvider, i.options.dnsProviderConfig, sa.Name, dnsProviderSecret, i.options.controllerManagerOverrides, i.options.dryRun)
if err != nil {
return err
}
glog.V(4).Info("Successfully created federation controller manager deployment")
fmt.Println(cmdOut, " done")
fmt.Fprint(cmdOut, "Updating kubeconfig...")
glog.V(4).Info("Updating kubeconfig")
// Pick the first ip/hostname to update the api server endpoint in kubeconfig and also to give information to user
// In case of NodePort Service for api server, ips are node external ips.
endpoint := ""
if len(ips) > 0 {
endpoint = ips[0]
} else if len(hostnames) > 0 {
endpoint = hostnames[0]
}
// If the service is nodeport, need to append the port to endpoint as it is non-standard port
if i.options.apiServerServiceType == v1.ServiceTypeNodePort {
endpoint = endpoint + ":" + strconv.Itoa(int(svc.Spec.Ports[0].NodePort))
}
err = updateKubeconfig(config, i.commonOptions.Name, endpoint, i.commonOptions.Kubeconfig, credentials, i.options.dryRun)
if err != nil {
glog.V(4).Infof("Failed to update kubeconfig: %v", err)
return err
}
fmt.Fprintln(cmdOut, " done")
glog.V(4).Info("Successfully updated kubeconfig")
if !i.options.dryRun {
fmt.Fprint(cmdOut, "Waiting for federation control plane to come up...")
glog.V(4).Info("Waiting for federation control plane to come up")
fedPods := []string{serverName, cmName}
err = waitForPods(cmdOut, hostClientset, fedPods, i.commonOptions.FederationSystemNamespace)
if err != nil {
return err
}
err = waitSrvHealthy(cmdOut, config, i.commonOptions.Name, i.commonOptions.Kubeconfig)
if err != nil {
return err
}
glog.V(4).Info("Federation control plane running")
fmt.Fprintln(cmdOut, " done")
return printSuccess(cmdOut, ips, hostnames, svc)
}
_, err = fmt.Fprintln(cmdOut, "Federation control plane runs (dry run)")
glog.V(4).Info("Federation control plane runs (dry run)")
return err
}
func createNamespace(clientset client.Interface, federationName, namespace string, dryRun bool) (*api.Namespace, error) {
ns := &api.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
}
if dryRun {
return ns, nil
}
return clientset.Core().Namespaces().Create(ns)
}
func createService(cmdOut io.Writer, clientset client.Interface, namespace, svcName, federationName, apiserverAdvertiseAddress string, apiserverServiceType v1.ServiceType, dryRun bool) (*api.Service, []string, []string, error) {
svc := &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: svcName,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: api.ServiceSpec{
Type: api.ServiceType(apiserverServiceType),
Selector: apiserverSvcSelector,
Ports: []api.ServicePort{
{
Name: "https",
Protocol: "TCP",
Port: 443,
TargetPort: intstr.FromString(apiServerSecurePortName),
},
},
},
}
if dryRun {
return svc, nil, nil, nil
}
var err error
svc, err = clientset.Core().Services(namespace).Create(svc)
ips := []string{}
hostnames := []string{}
if apiserverServiceType == v1.ServiceTypeLoadBalancer {
ips, hostnames, err = waitForLoadBalancerAddress(cmdOut, clientset, svc, dryRun)
} else {
if apiserverAdvertiseAddress != "" {
ips = append(ips, apiserverAdvertiseAddress)
} else {
ips, err = getClusterNodeIPs(clientset)
}
}
if err != nil {
return svc, nil, nil, err
}
return svc, ips, hostnames, err
}
func getClusterNodeIPs(clientset client.Interface) ([]string, error) {
preferredAddressTypes := []api.NodeAddressType{
api.NodeExternalIP,
}
nodeList, err := clientset.Core().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
nodeAddresses := []string{}
for _, node := range nodeList.Items {
OuterLoop:
for _, addressType := range preferredAddressTypes {
for _, address := range node.Status.Addresses {
if address.Type == addressType {
nodeAddresses = append(nodeAddresses, address.Address)
break OuterLoop
}
}
}
}
return nodeAddresses, nil
}
func waitForLoadBalancerAddress(cmdOut io.Writer, clientset client.Interface, svc *api.Service, dryRun bool) ([]string, []string, error) {
ips := []string{}
hostnames := []string{}
if dryRun {
return ips, hostnames, nil
}
err := wait.PollImmediateInfinite(lbAddrRetryInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
pollSvc, err := clientset.Core().Services(svc.Namespace).Get(svc.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
if ings := pollSvc.Status.LoadBalancer.Ingress; len(ings) > 0 {
for _, ing := range ings {
if len(ing.IP) > 0 {
ips = append(ips, ing.IP)
}
if len(ing.Hostname) > 0 {
hostnames = append(hostnames, ing.Hostname)
}
}
if len(ips) > 0 || len(hostnames) > 0 {
return true, nil
}
}
return false, nil
})
if err != nil {
return nil, nil, err
}
return ips, hostnames, nil
}
func generateCredentials(svcNamespace, name, svcName, localDNSZoneName, serverCredName string, ips, hostnames []string, enableHTTPBasicAuth, enableTokenAuth, dryRun bool) (*credentials, error) {
credentials := credentials{
username: AdminCN,
}
if enableHTTPBasicAuth {
credentials.password = string(uuid.NewUUID())
}
if enableTokenAuth {
credentials.token = string(uuid.NewUUID())
}
entKeyPairs, err := genCerts(svcNamespace, name, svcName, localDNSZoneName, ips, hostnames)
if err != nil {
return nil, err
}
credentials.certEntKeyPairs = entKeyPairs
return &credentials, nil
}
func genCerts(svcNamespace, name, svcName, localDNSZoneName string, ips, hostnames []string) (*entityKeyPairs, error) {
ca, err := triple.NewCA(name)
if err != nil {
return nil, fmt.Errorf("failed to create CA key and certificate: %v", err)
}
server, err := triple.NewServerKeyPair(ca, APIServerCN, svcName, svcNamespace, localDNSZoneName, ips, hostnames)
if err != nil {
return nil, fmt.Errorf("failed to create federation API server key and certificate: %v", err)
}
cm, err := triple.NewClientKeyPair(ca, ControllerManagerCN, nil)
if err != nil {
return nil, fmt.Errorf("failed to create federation controller manager client key and certificate: %v", err)
}
admin, err := triple.NewClientKeyPair(ca, AdminCN, nil)
if err != nil {
return nil, fmt.Errorf("failed to create client key and certificate for an admin: %v", err)
}
return &entityKeyPairs{
ca: ca,
server: server,
controllerManager: cm,
admin: admin,
}, nil
}
func createAPIServerCredentialsSecret(clientset client.Interface, namespace, credentialsName, federationName string, credentials *credentials, dryRun bool) (*api.Secret, error) {
// Build the secret object with API server credentials.
data := map[string][]byte{
"ca.crt": certutil.EncodeCertPEM(credentials.certEntKeyPairs.ca.Cert),
"server.crt": certutil.EncodeCertPEM(credentials.certEntKeyPairs.server.Cert),
"server.key": certutil.EncodePrivateKeyPEM(credentials.certEntKeyPairs.server.Key),
}
if credentials.password != "" {
data["basicauth.csv"] = authFileContents(credentials.username, credentials.password)
}
if credentials.token != "" {
data["token.csv"] = authFileContents(credentials.username, credentials.token)
}
secret := &api.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: credentialsName,
Namespace: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Data: data,
}
if dryRun {
return secret, nil
}
// Boilerplate to create the secret in the host cluster.
return clientset.Core().Secrets(namespace).Create(secret)
}
func createControllerManagerKubeconfigSecret(clientset client.Interface, namespace, name, svcName, kubeconfigName string, entKeyPairs *entityKeyPairs, dryRun bool) (*api.Secret, error) {
config := kubeconfigutil.CreateWithCerts(
fmt.Sprintf("https://%s", svcName),
name,
ControllerManagerUser,
certutil.EncodeCertPEM(entKeyPairs.ca.Cert),
certutil.EncodePrivateKeyPEM(entKeyPairs.controllerManager.Key),
certutil.EncodeCertPEM(entKeyPairs.controllerManager.Cert),
)
return util.CreateKubeconfigSecret(clientset, config, namespace, kubeconfigName, name, "", dryRun)
}
func createPVC(clientset client.Interface, namespace, svcName, federationName, etcdPVCapacity string, dryRun bool) (*api.PersistentVolumeClaim, error) {
capacity, err := resource.ParseQuantity(etcdPVCapacity)
if err != nil {
return nil, err
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-etcd-claim", svcName),
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "yes",
federation.FederationNameAnnotation: federationName},
},
Spec: api.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceStorage: capacity,
},
},
},
}
if dryRun {
return pvc, nil
}
return clientset.Core().PersistentVolumeClaims(namespace).Create(pvc)
}
func createAPIServer(clientset client.Interface, namespace, name, federationName, image, advertiseAddress, credentialsName string, hasHTTPBasicAuthFile, hasTokenAuthFile bool, argOverrides map[string]string, pvc *api.PersistentVolumeClaim, dryRun bool) (*extensions.Deployment, error) {
command := []string{
"/hyperkube",
"federation-apiserver",
}
argsMap := map[string]string{
"--bind-address": "0.0.0.0",
"--etcd-servers": "http://localhost:2379",
"--secure-port": fmt.Sprintf("%d", apiServerSecurePort),
"--client-ca-file": "/etc/federation/apiserver/ca.crt",
"--tls-cert-file": "/etc/federation/apiserver/server.crt",
"--tls-private-key-file": "/etc/federation/apiserver/server.key",
"--admission-control": "NamespaceLifecycle",
}
if advertiseAddress != "" {
argsMap["--advertise-address"] = advertiseAddress
}
if hasHTTPBasicAuthFile {
argsMap["--basic-auth-file"] = "/etc/federation/apiserver/basicauth.csv"
}
if hasTokenAuthFile {
argsMap["--token-auth-file"] = "/etc/federation/apiserver/token.csv"
}
args := argMapsToArgStrings(argsMap, argOverrides)
command = append(command, args...)
dep := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: extensions.DeploymentSpec{
Replicas: 1,
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: apiserverPodLabels,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "apiserver",
Image: image,
Command: command,
Ports: []api.ContainerPort{
{
Name: apiServerSecurePortName,
ContainerPort: apiServerSecurePort,
},
{
Name: "local",
ContainerPort: 8080,
},
},
VolumeMounts: []api.VolumeMount{
{
Name: credentialsName,
MountPath: "/etc/federation/apiserver",
ReadOnly: true,
},
},
},
{
Name: "etcd",
Image: "gcr.io/google_containers/etcd:3.0.17",
Command: []string{
"/usr/local/bin/etcd",
"--data-dir",
"/var/etcd/data",
},
},
},
Volumes: []api.Volume{
{
Name: credentialsName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: credentialsName,
},
},
},
},
},
},
},
}
if pvc != nil {
dataVolumeName := "etcddata"
etcdVolume := api.Volume{
Name: dataVolumeName,
VolumeSource: api.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
}
etcdVolumeMount := api.VolumeMount{
Name: dataVolumeName,
MountPath: "/var/etcd",
}
dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, etcdVolume)
for i, container := range dep.Spec.Template.Spec.Containers {
if container.Name == "etcd" {
dep.Spec.Template.Spec.Containers[i].VolumeMounts = append(dep.Spec.Template.Spec.Containers[i].VolumeMounts, etcdVolumeMount)
}
}
}
if dryRun {
return dep, nil
}
createdDep, err := clientset.Extensions().Deployments(namespace).Create(dep)
return createdDep, err
}
func createControllerManagerSA(clientset client.Interface, namespace, federationName string, dryRun bool) (*api.ServiceAccount, error) {
sa := &api.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: ControllerManagerSA,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
}
if dryRun {
return sa, nil
}
return clientset.Core().ServiceAccounts(namespace).Create(sa)
}
func createRoleBindings(clientset client.Interface, namespace, saName, federationName string, dryRun bool) (*rbac.Role, *rbac.RoleBinding, error) {
roleName := "federation-system:federation-controller-manager"
role := &rbac.Role{
// a role to use for bootstrapping the federation-controller-manager so it can access
// secrets in the host cluster to access other clusters.
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
Namespace: namespace,
Labels: componentLabel,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Rules: []rbac.PolicyRule{
rbac.NewRule("get", "list", "watch").Groups(legacyAPIGroup).Resources("secrets").RuleOrDie(),
},
}
rolebinding, err := rbac.NewRoleBinding(roleName, namespace).SAs(namespace, saName).Binding()
if err != nil {
return nil, nil, err
}
rolebinding.Labels = componentLabel
rolebinding.Annotations = map[string]string{federation.FederationNameAnnotation: federationName}
if dryRun {
return role, &rolebinding, nil
}
newRole, err := clientset.Rbac().Roles(namespace).Create(role)
if err != nil {
return nil, nil, err
}
newRolebinding, err := clientset.Rbac().RoleBindings(namespace).Create(&rolebinding)
return newRole, newRolebinding, err
}
func createControllerManager(clientset client.Interface, namespace, name, svcName, cmName, image, kubeconfigName, dnsZoneName, dnsProvider, dnsProviderConfig, saName string, dnsProviderSecret *api.Secret, argOverrides map[string]string, dryRun bool) (*extensions.Deployment, error) {
command := []string{
"/hyperkube",
"federation-controller-manager",
}
argsMap := map[string]string{
"--kubeconfig": "/etc/federation/controller-manager/kubeconfig",
}
argsMap["--master"] = fmt.Sprintf("https://%s", svcName)
argsMap["--dns-provider"] = dnsProvider
argsMap["--federation-name"] = name
argsMap["--zone-name"] = dnsZoneName
args := argMapsToArgStrings(argsMap, argOverrides)
command = append(command, args...)
dep := &extensions.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Namespace: namespace,
Labels: componentLabel,
// We additionally update the details (in annotations) about the
// kube-dns config map which needs to be created in the clusters
// registering to this federation (at kubefed join).
// We wont otherwise have this information available at kubefed join.
Annotations: map[string]string{
// TODO: the name/domain name pair should ideally be checked for naming convention
// as done in kube-dns federation flags check.
// https://github.com/kubernetes/dns/blob/master/pkg/dns/federation/federation.go
// TODO v2: Until kube-dns can handle trailing periods we strip them all.
// See https://github.com/kubernetes/dns/issues/67
util.FedDomainMapKey: fmt.Sprintf("%s=%s", name, strings.TrimRight(dnsZoneName, ".")),
federation.FederationNameAnnotation: name,
},
},
Spec: extensions.DeploymentSpec{
Replicas: 1,
Template: api.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Labels: controllerManagerPodLabels,
Annotations: map[string]string{federation.FederationNameAnnotation: name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "controller-manager",
Image: image,
Command: command,
VolumeMounts: []api.VolumeMount{
{
Name: kubeconfigName,
MountPath: "/etc/federation/controller-manager",
ReadOnly: true,
},
},
Env: []api.EnvVar{
{
Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
FieldPath: "metadata.namespace",
},
},
},
},
},
},
Volumes: []api.Volume{
{
Name: kubeconfigName,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: kubeconfigName,
},
},
},
},
},
},
},
}
if saName != "" {
dep.Spec.Template.Spec.ServiceAccountName = saName
}
if dnsProviderSecret != nil {
dep = addDNSProviderConfig(dep, dnsProviderSecret.Name)
if dnsProvider == util.FedDNSProviderCoreDNS {
var err error
dep, err = addCoreDNSServerAnnotation(dep, dnsZoneName, dnsProviderConfig)
if err != nil {
return nil, err
}
}
}
if dryRun {
return dep, nil
}
return clientset.Extensions().Deployments(namespace).Create(dep)
}
func marshallOverrides(overrideArgString string) (map[string]string, error) {
if overrideArgString == "" {
return nil, nil
}
argsMap := make(map[string]string)
overrideArgs := strings.Split(overrideArgString, ",")
for _, overrideArg := range overrideArgs {
splitArg := strings.SplitN(overrideArg, "=", 2)
if len(splitArg) != 2 {
return nil, fmt.Errorf("wrong format for override arg: %s", overrideArg)
}
key := strings.TrimSpace(splitArg[0])
val := strings.TrimSpace(splitArg[1])
if len(key) == 0 {
return nil, fmt.Errorf("wrong format for override arg: %s, arg name cannot be empty", overrideArg)
}
argsMap[key] = val
}
return argsMap, nil
}
func argMapsToArgStrings(argsMap, overrides map[string]string) []string {
for key, val := range overrides {
argsMap[key] = val
}
args := []string{}
for key, value := range argsMap {
args = append(args, fmt.Sprintf("%s=%s", key, value))
}
// This is needed for the unit test deep copy to get an exact match
sort.Strings(args)
return args
}
func waitForPods(cmdOut io.Writer, clientset client.Interface, fedPods []string, namespace string) error {
err := wait.PollInfinite(podWaitInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
podCheck := len(fedPods)
podList, err := clientset.Core().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
return false, nil
}
for _, pod := range podList.Items {
for _, fedPod := range fedPods {
if strings.HasPrefix(pod.Name, fedPod) && pod.Status.Phase == "Running" {
podCheck -= 1
}
}
//ensure that all pods are in running state or keep waiting
if podCheck == 0 {
return true, nil
}
}
return false, nil
})
return err
}
func waitSrvHealthy(cmdOut io.Writer, config util.AdminConfig, context, kubeconfig string) error {
fedClientSet, err := config.FederationClientset(context, kubeconfig)
if err != nil {
return err
}
fedDiscoveryClient := fedClientSet.Discovery()
err = wait.PollInfinite(podWaitInterval, func() (bool, error) {
fmt.Fprint(cmdOut, ".")
body, err := fedDiscoveryClient.RESTClient().Get().AbsPath("/healthz").Do().Raw()
if err != nil {
return false, nil
}
if strings.EqualFold(string(body), "ok") {
return true, nil
}
return false, nil
})
return err
}
func printSuccess(cmdOut io.Writer, ips, hostnames []string, svc *api.Service) error {
svcEndpoints := append(ips, hostnames...)
endpoints := strings.Join(svcEndpoints, ", ")
if svc.Spec.Type == api.ServiceTypeNodePort {
endpoints = ips[0] + ":" + strconv.Itoa(int(svc.Spec.Ports[0].NodePort))
if len(ips) > 1 {
endpoints = endpoints + ", ..."
}
}
_, err := fmt.Fprintf(cmdOut, "Federation API server is running at: %s\n", endpoints)
return err
}
func updateKubeconfig(config util.AdminConfig, name, endpoint, kubeConfigPath string, credentials *credentials, dryRun bool) error {
po := config.PathOptions()
po.LoadingRules.ExplicitPath = kubeConfigPath
kubeconfig, err := po.GetStartingConfig()
if err != nil {
return err
}
// Populate API server endpoint info.
cluster := clientcmdapi.NewCluster()
// Prefix "https" as the URL scheme to endpoint.
if !strings.HasPrefix(endpoint, "https://") {
endpoint = fmt.Sprintf("https://%s", endpoint)
}
cluster.Server = endpoint
cluster.CertificateAuthorityData = certutil.EncodeCertPEM(credentials.certEntKeyPairs.ca.Cert)
// Populate credentials.
authInfo := clientcmdapi.NewAuthInfo()
authInfo.ClientCertificateData = certutil.EncodeCertPEM(credentials.certEntKeyPairs.admin.Cert)
authInfo.ClientKeyData = certutil.EncodePrivateKeyPEM(credentials.certEntKeyPairs.admin.Key)
authInfo.Token = credentials.token
var httpBasicAuthInfo *clientcmdapi.AuthInfo
if credentials.password != "" {
httpBasicAuthInfo = clientcmdapi.NewAuthInfo()
httpBasicAuthInfo.Password = credentials.password
httpBasicAuthInfo.Username = credentials.username
}
// Populate context.
context := clientcmdapi.NewContext()
context.Cluster = name
context.AuthInfo = name
// Update the config struct with API server endpoint info,
// credentials and context.
kubeconfig.Clusters[name] = cluster
kubeconfig.AuthInfos[name] = authInfo
if httpBasicAuthInfo != nil {
kubeconfig.AuthInfos[fmt.Sprintf("%s-basic-auth", name)] = httpBasicAuthInfo
}
kubeconfig.Contexts[name] = context
if !dryRun {
// Write the update kubeconfig.
if err := clientcmd.ModifyConfig(po, *kubeconfig, true); err != nil {
return err
}
}
return nil
}
func createDNSProviderConfigSecret(clientset client.Interface, namespace, name, federationName string, dnsProviderConfigBytes []byte, dryRun bool) (*api.Secret, error) {
if dnsProviderConfigBytes == nil {
return nil, nil
}
secretSpec := &api.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{federation.FederationNameAnnotation: federationName},
},
Data: map[string][]byte{
name: dnsProviderConfigBytes,
},
}
var secret *api.Secret
var err error
if !dryRun {
secret, err = clientset.Core().Secrets(namespace).Create(secretSpec)
if err != nil {
return nil, err
}
}
return secret, nil
}
func addDNSProviderConfig(dep *extensions.Deployment, secretName string) *extensions.Deployment {
const (
dnsProviderConfigVolume = "config-volume"
dnsProviderConfigMountPath = "/etc/federation/dns-provider"
)
// Create a volume from dns-provider secret
volume := api.Volume{
Name: dnsProviderConfigVolume,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: secretName,
},
},
}
dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, volume)
// Mount dns-provider secret volume to controller-manager container
volumeMount := api.VolumeMount{
Name: dnsProviderConfigVolume,
MountPath: dnsProviderConfigMountPath,
ReadOnly: true,
}
dep.Spec.Template.Spec.Containers[0].VolumeMounts = append(dep.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
dep.Spec.Template.Spec.Containers[0].Command = append(dep.Spec.Template.Spec.Containers[0].Command, fmt.Sprintf("--dns-provider-config=%s/%s", dnsProviderConfigMountPath, secretName))
return dep
}
// authFileContents returns a CSV string containing the contents of an
// authentication file in the format required by the federation-apiserver.
func authFileContents(username, authSecret string) []byte {
return []byte(fmt.Sprintf("%s,%s,%s\n", authSecret, username, uuid.NewUUID()))
}
func addCoreDNSServerAnnotation(deployment *extensions.Deployment, dnsZoneName, dnsProviderConfig string) (*extensions.Deployment, error) {
var cfg coredns.Config
if err := gcfg.ReadFileInto(&cfg, dnsProviderConfig); err != nil {
return nil, err
}
deployment.Annotations[util.FedDNSZoneName] = dnsZoneName
deployment.Annotations[util.FedNameServer] = cfg.Global.CoreDNSEndpoints
deployment.Annotations[util.FedDNSProvider] = util.FedDNSProviderCoreDNS
return deployment, nil
}
| federation/pkg/kubefed/init/init.go | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.9974064230918884,
0.022189630195498466,
0.0001621949631953612,
0.00027356736245565116,
0.12966738641262054
] |
{
"id": 8,
"code_window": [
"\tapiServerEnableTokenAuth bool\n",
"}\n",
"\n",
"func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {\n",
"\tdefaultImage := fmt.Sprintf(\"%s:%s\", hyperkubeImageName, version.Get())\n",
"\n",
"\tflags.StringVar(&o.dnsZoneName, \"dns-zone-name\", \"\", \"DNS suffix for this federation. Federated Service DNS names are published with this suffix.\")\n",
"\tflags.StringVar(&o.image, \"image\", defaultImage, \"Image to use for federation API server and controller manager binaries.\")\n",
"\tflags.StringVar(&o.dnsProvider, \"dns-provider\", \"\", \"Dns provider to be used for this deployment.\")\n",
"\tflags.StringVar(&o.dnsProviderConfig, \"dns-provider-config\", \"\", \"Config file path on local file system for configuring DNS provider.\")\n",
"\tflags.StringVar(&o.etcdPVCapacity, \"etcd-pv-capacity\", \"10Gi\", \"Size of persistent volume claim to be used for etcd.\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (o *initFederationOptions) Bind(flags *pflag.FlagSet, defaultImage string) {\n"
],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 156
} | # OAI object model [](https://ci.vmware.run/go-openapi/spec) [](https://coverage.vmware.run/go-openapi/spec) [](https://slackin.goswagger.io)
[](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [](http://godoc.org/github.com/go-openapi/spec)
The object model for OpenAPI specification documents | vendor/github.com/go-openapi/spec/README.md | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00016925590171013027,
0.00016925590171013027,
0.00016925590171013027,
0.00016925590171013027,
0
] |
{
"id": 8,
"code_window": [
"\tapiServerEnableTokenAuth bool\n",
"}\n",
"\n",
"func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {\n",
"\tdefaultImage := fmt.Sprintf(\"%s:%s\", hyperkubeImageName, version.Get())\n",
"\n",
"\tflags.StringVar(&o.dnsZoneName, \"dns-zone-name\", \"\", \"DNS suffix for this federation. Federated Service DNS names are published with this suffix.\")\n",
"\tflags.StringVar(&o.image, \"image\", defaultImage, \"Image to use for federation API server and controller manager binaries.\")\n",
"\tflags.StringVar(&o.dnsProvider, \"dns-provider\", \"\", \"Dns provider to be used for this deployment.\")\n",
"\tflags.StringVar(&o.dnsProviderConfig, \"dns-provider-config\", \"\", \"Config file path on local file system for configuring DNS provider.\")\n",
"\tflags.StringVar(&o.etcdPVCapacity, \"etcd-pv-capacity\", \"10Gi\", \"Size of persistent volume claim to be used for etcd.\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (o *initFederationOptions) Bind(flags *pflag.FlagSet, defaultImage string) {\n"
],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 156
} | #!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
make -C "${KUBE_ROOT}" WHAT=cmd/hyperkube
# Add other BADSYMBOLS here.
BADSYMBOLS=(
"httptest"
"testify"
"testing[.]"
)
# b/c hyperkube binds everything simply check that for bad symbols
SYMBOLS="$(nm ${KUBE_OUTPUT_HOSTBIN}/hyperkube)"
RESULT=0
for BADSYMBOL in "${BADSYMBOLS[@]}"; do
if FOUND=$(echo "$SYMBOLS" | grep "$BADSYMBOL"); then
echo "Found bad symbol '${BADSYMBOL}':"
echo "$FOUND"
RESULT=1
fi
done
exit $RESULT
# ex: ts=2 sw=2 et filetype=sh
| hack/verify-symbols.sh | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00025096655008383095,
0.00018756455392576754,
0.00016870374383870512,
0.00017369065608363599,
0.00003176065001753159
] |
{
"id": 8,
"code_window": [
"\tapiServerEnableTokenAuth bool\n",
"}\n",
"\n",
"func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {\n",
"\tdefaultImage := fmt.Sprintf(\"%s:%s\", hyperkubeImageName, version.Get())\n",
"\n",
"\tflags.StringVar(&o.dnsZoneName, \"dns-zone-name\", \"\", \"DNS suffix for this federation. Federated Service DNS names are published with this suffix.\")\n",
"\tflags.StringVar(&o.image, \"image\", defaultImage, \"Image to use for federation API server and controller manager binaries.\")\n",
"\tflags.StringVar(&o.dnsProvider, \"dns-provider\", \"\", \"Dns provider to be used for this deployment.\")\n",
"\tflags.StringVar(&o.dnsProviderConfig, \"dns-provider-config\", \"\", \"Config file path on local file system for configuring DNS provider.\")\n",
"\tflags.StringVar(&o.etcdPVCapacity, \"etcd-pv-capacity\", \"10Gi\", \"Size of persistent volume claim to be used for etcd.\")\n"
],
"labels": [
"keep",
"keep",
"keep",
"replace",
"replace",
"replace",
"keep",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func (o *initFederationOptions) Bind(flags *pflag.FlagSet, defaultImage string) {\n"
],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 156
} | package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
// Changes may cause incorrect behavior and will be lost if the code is
// regenerated.
import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"net/http"
)
// VirtualNetworksClient is the the Microsoft Azure Network management API
// provides a RESTful set of web services that interact with Microsoft Azure
// Networks service to manage your network resources. The API has entities
// that capture the relationship between an end user and the Microsoft Azure
// Networks service.
type VirtualNetworksClient struct {
ManagementClient
}
// NewVirtualNetworksClient creates an instance of the VirtualNetworksClient
// client.
func NewVirtualNetworksClient(subscriptionID string) VirtualNetworksClient {
return NewVirtualNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewVirtualNetworksClientWithBaseURI creates an instance of the
// VirtualNetworksClient client.
func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworksClient {
return VirtualNetworksClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// CheckIPAddressAvailability checks whether a private IP address is available
// for use.
//
// resourceGroupName is the name of the resource group. virtualNetworkName is
// the name of the virtual network. ipAddress is the private IP address to be
// verified.
func (client VirtualNetworksClient) CheckIPAddressAvailability(resourceGroupName string, virtualNetworkName string, ipAddress string) (result IPAddressAvailabilityResult, err error) {
req, err := client.CheckIPAddressAvailabilityPreparer(resourceGroupName, virtualNetworkName, ipAddress)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", nil, "Failure preparing request")
}
resp, err := client.CheckIPAddressAvailabilitySender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure sending request")
}
result, err = client.CheckIPAddressAvailabilityResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure responding to request")
}
return
}
// CheckIPAddressAvailabilityPreparer prepares the CheckIPAddressAvailability request.
func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceGroupName string, virtualNetworkName string, ipAddress string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
if len(ipAddress) > 0 {
queryParameters["ipAddress"] = autorest.Encode("query", ipAddress)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// CheckIPAddressAvailabilitySender sends the CheckIPAddressAvailability request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualNetworksClient) CheckIPAddressAvailabilitySender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// CheckIPAddressAvailabilityResponder handles the response to the CheckIPAddressAvailability request. The method always
// closes the http.Response Body.
func (client VirtualNetworksClient) CheckIPAddressAvailabilityResponder(resp *http.Response) (result IPAddressAvailabilityResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// CreateOrUpdate creates or updates a virtual network in the specified
// resource group. This method may poll for completion. Polling can be
// canceled by passing the cancel channel argument. The channel will be used
// to cancel polling and any outstanding HTTP requests.
//
// resourceGroupName is the name of the resource group. virtualNetworkName is
// the name of the virtual network. parameters is parameters supplied to the
// create or update virtual network operation
func (client VirtualNetworksClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (result autorest.Response, err error) {
req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, parameters, cancel)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", nil, "Failure preparing request")
}
resp, err := client.CreateOrUpdateSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure sending request")
}
result, err = client.CreateOrUpdateResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CreateOrUpdate", resp, "Failure responding to request")
}
return
}
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, parameters VirtualNetwork, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsJSON(),
autorest.AsPut(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
// closes the http.Response Body.
func (client VirtualNetworksClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByClosing())
result.Response = resp
return
}
// Delete deletes the specified virtual network. This method may poll for
// completion. Polling can be canceled by passing the cancel channel
// argument. The channel will be used to cancel polling and any outstanding
// HTTP requests.
//
// resourceGroupName is the name of the resource group. virtualNetworkName is
// the name of the virtual network.
func (client VirtualNetworksClient) Delete(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (result autorest.Response, err error) {
req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, cancel)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", nil, "Failure preparing request")
}
resp, err := client.DeleteSender(req)
if err != nil {
result.Response = resp
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure sending request")
}
result, err = client.DeleteResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Delete", resp, "Failure responding to request")
}
return
}
// DeletePreparer prepares the Delete request.
func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, cancel <-chan struct{}) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsDelete(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{Cancel: cancel})
}
// DeleteSender sends the Delete request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualNetworksClient) DeleteSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client,
req,
azure.DoPollForAsynchronous(client.PollingDelay))
}
// DeleteResponder handles the response to the Delete request. The method always
// closes the http.Response Body.
func (client VirtualNetworksClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusAccepted, http.StatusNoContent, http.StatusOK),
autorest.ByClosing())
result.Response = resp
return
}
// Get gets the specified virtual network by resource group.
//
// resourceGroupName is the name of the resource group. virtualNetworkName is
// the name of the virtual network. expand is expands referenced resources.
func (client VirtualNetworksClient) Get(resourceGroupName string, virtualNetworkName string, expand string) (result VirtualNetwork, err error) {
req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, expand)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", nil, "Failure preparing request")
}
resp, err := client.GetSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure sending request")
}
result, err = client.GetResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "Get", resp, "Failure responding to request")
}
return
}
// GetPreparer prepares the Get request.
func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtualNetworkName string, expand string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
"virtualNetworkName": autorest.Encode("path", virtualNetworkName),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// GetSender sends the Get request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// GetResponder handles the response to the Get request. The method always
// closes the http.Response Body.
func (client VirtualNetworksClient) GetResponder(resp *http.Response) (result VirtualNetwork, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// List gets all virtual networks in a resource group.
//
// resourceGroupName is the name of the resource group.
func (client VirtualNetworksClient) List(resourceGroupName string) (result VirtualNetworkListResult, err error) {
req, err := client.ListPreparer(resourceGroupName)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing request")
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to request")
}
return
}
// ListPreparer prepares the List request.
func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client VirtualNetworksClient) ListResponder(resp *http.Response) (result VirtualNetworkListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListNextResults retrieves the next set of results, if any.
func (client VirtualNetworksClient) ListNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) {
req, err := lastResults.VirtualNetworkListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "List", resp, "Failure responding to next results request")
}
return
}
// ListAll gets all virtual networks in a subscription.
func (client VirtualNetworksClient) ListAll() (result VirtualNetworkListResult, err error) {
req, err := client.ListAllPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing request")
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending request")
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to request")
}
return
}
// ListAllPreparer prepares the ListAll request.
func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) {
pathParameters := map[string]interface{}{
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
queryParameters := map[string]interface{}{
"api-version": client.APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare(&http.Request{})
}
// ListAllSender sends the ListAll request. The method will close the
// http.Response Body if it receives an error.
func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) {
return autorest.SendWithSender(client, req)
}
// ListAllResponder handles the response to the ListAll request. The method always
// closes the http.Response Body.
func (client VirtualNetworksClient) ListAllResponder(resp *http.Response) (result VirtualNetworkListResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ListAllNextResults retrieves the next set of results, if any.
func (client VirtualNetworksClient) ListAllNextResults(lastResults VirtualNetworkListResult) (result VirtualNetworkListResult, err error) {
req, err := lastResults.VirtualNetworkListResultPreparer()
if err != nil {
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListAllSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure sending next results request")
}
result, err = client.ListAllResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "ListAll", resp, "Failure responding to next results request")
}
return
}
| vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0004423217324074358,
0.00017591840878594667,
0.00016348699864465743,
0.00016838072042446584,
0.000039825514249969274
] |
{
"id": 9,
"code_window": [
"\tflags.BoolVar(&o.apiServerEnableTokenAuth, \"apiserver-enable-token-auth\", false, \"Enables token authentication for the federation-apiserver. Defaults to false.\")\n",
"}\n",
"\n",
"// NewCmdInit defines the `init` command that bootstraps a federation\n",
"// control plane inside a set of host clusters.\n",
"func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {\n",
"\topts := &initFederation{}\n",
"\n",
"\tcmd := &cobra.Command{\n",
"\t\tUse: \"init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewCmdInit(cmdOut io.Writer, config util.AdminConfig, defaultImage string) *cobra.Command {\n"
],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 176
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["kubefed.go"],
tags = ["automanaged"],
deps = [
"//federation/pkg/kubefed:go_default_library",
"//pkg/client/metrics/prometheus:go_default_library",
"//pkg/kubectl/cmd/util:go_default_library",
"//pkg/util/logs:go_default_library",
"//pkg/version/prometheus:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| federation/cmd/kubefed/app/BUILD | 1 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017460659728385508,
0.00017187200137414038,
0.00016972926096059382,
0.0001715760736260563,
0.0000019011605445484747
] |
{
"id": 9,
"code_window": [
"\tflags.BoolVar(&o.apiServerEnableTokenAuth, \"apiserver-enable-token-auth\", false, \"Enables token authentication for the federation-apiserver. Defaults to false.\")\n",
"}\n",
"\n",
"// NewCmdInit defines the `init` command that bootstraps a federation\n",
"// control plane inside a set of host clusters.\n",
"func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {\n",
"\topts := &initFederation{}\n",
"\n",
"\tcmd := &cobra.Command{\n",
"\t\tUse: \"init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewCmdInit(cmdOut io.Writer, config util.AdminConfig, defaultImage string) *cobra.Command {\n"
],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 176
} | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
package v1
import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions/internalinterfaces"
v1 "k8s.io/kubernetes/pkg/client/listers/core/v1"
time "time"
)
// NodeInformer provides access to a shared informer and lister for
// Nodes.
type NodeInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.NodeLister
}
type nodeInformer struct {
factory internalinterfaces.SharedInformerFactory
}
func newNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
return client.CoreV1().Nodes().List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
return client.CoreV1().Nodes().Watch(options)
},
},
&api_v1.Node{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
return sharedIndexInformer
}
func (f *nodeInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&api_v1.Node{}, newNodeInformer)
}
func (f *nodeInformer) Lister() v1.NodeLister {
return v1.NewNodeLister(f.Informer().GetIndexer())
}
| pkg/client/informers/informers_generated/externalversions/core/v1/node.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.0001778147998265922,
0.000172210275195539,
0.00016659240645822138,
0.0001713815436232835,
0.0000035619730169855757
] |
{
"id": 9,
"code_window": [
"\tflags.BoolVar(&o.apiServerEnableTokenAuth, \"apiserver-enable-token-auth\", false, \"Enables token authentication for the federation-apiserver. Defaults to false.\")\n",
"}\n",
"\n",
"// NewCmdInit defines the `init` command that bootstraps a federation\n",
"// control plane inside a set of host clusters.\n",
"func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {\n",
"\topts := &initFederation{}\n",
"\n",
"\tcmd := &cobra.Command{\n",
"\t\tUse: \"init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewCmdInit(cmdOut io.Writer, config util.AdminConfig, defaultImage string) *cobra.Command {\n"
],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 176
} | package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["doc.go"],
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/api/meta/metatypes:all-srcs",
],
tags = ["automanaged"],
)
| pkg/api/meta/BUILD | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017526747251395136,
0.0001739592698868364,
0.0001726280024740845,
0.00017397082410752773,
9.700235068521579e-7
] |
{
"id": 9,
"code_window": [
"\tflags.BoolVar(&o.apiServerEnableTokenAuth, \"apiserver-enable-token-auth\", false, \"Enables token authentication for the federation-apiserver. Defaults to false.\")\n",
"}\n",
"\n",
"// NewCmdInit defines the `init` command that bootstraps a federation\n",
"// control plane inside a set of host clusters.\n",
"func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {\n",
"\topts := &initFederation{}\n",
"\n",
"\tcmd := &cobra.Command{\n",
"\t\tUse: \"init FEDERATION_NAME --host-cluster-context=HOST_CONTEXT\",\n"
],
"labels": [
"keep",
"keep",
"keep",
"keep",
"keep",
"replace",
"keep",
"keep",
"keep",
"keep"
],
"after_edit": [
"func NewCmdInit(cmdOut io.Writer, config util.AdminConfig, defaultImage string) *cobra.Command {\n"
],
"file_path": "federation/pkg/kubefed/init/init.go",
"type": "replace",
"edit_start_line_idx": 176
} | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
| staging/src/k8s.io/client-go/pkg/apis/authentication/v1beta1/defaults.go | 0 | https://github.com/kubernetes/kubernetes/commit/b4381d0c4473c8c8266950d8c0ed60b334361de3 | [
0.00017703082994557917,
0.00017371548165101558,
0.00016745894390624017,
0.00017665668565314263,
0.000004426679424796021
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.